1/*
2 * Copyright IBM Corp. 2007, 2009
3 *
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
5 *	      Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#define KMSG_COMPONENT "sclp_cmd"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11#include <linux/completion.h>
12#include <linux/init.h>
13#include <linux/errno.h>
14#include <linux/err.h>
15#include <linux/slab.h>
16#include <linux/string.h>
17#include <linux/mm.h>
18#include <linux/mmzone.h>
19#include <linux/memory.h>
20#include <linux/platform_device.h>
21#include <asm/chpid.h>
22#include <asm/sclp.h>
23#include <asm/setup.h>
24#include <asm/ctl_reg.h>
25
26#include "sclp.h"
27
28#define SCLP_CMDW_READ_SCP_INFO		0x00020001
29#define SCLP_CMDW_READ_SCP_INFO_FORCED	0x00120001
30
31struct read_info_sccb {
32	struct	sccb_header header;	/* 0-7 */
33	u16	rnmax;			/* 8-9 */
34	u8	rnsize;			/* 10 */
35	u8	_reserved0[24 - 11];	/* 11-15 */
36	u8	loadparm[8];		/* 24-31 */
37	u8	_reserved1[48 - 32];	/* 32-47 */
38	u64	facilities;		/* 48-55 */
39	u8	_reserved2[84 - 56];	/* 56-83 */
40	u8	fac84;			/* 84 */
41	u8	_reserved3[91 - 85];	/* 85-90 */
42	u8	flags;			/* 91 */
43	u8	_reserved4[100 - 92];	/* 92-99 */
44	u32	rnsize2;		/* 100-103 */
45	u64	rnmax2;			/* 104-111 */
46	u8	_reserved5[4096 - 112];	/* 112-4095 */
47} __attribute__((packed, aligned(PAGE_SIZE)));
48
49static struct read_info_sccb __initdata early_read_info_sccb;
50static int __initdata early_read_info_sccb_valid;
51
52u64 sclp_facilities;
53static u8 sclp_fac84;
54static unsigned long long rzm;
55static unsigned long long rnmax;
56
57static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
58{
59	int rc;
60
61	__ctl_set_bit(0, 9);
62	rc = sclp_service_call(cmd, sccb);
63	if (rc)
64		goto out;
65	__load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA |
66			PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT);
67	local_irq_disable();
68out:
69	/* Contents of the sccb might have changed. */
70	barrier();
71	__ctl_clear_bit(0, 9);
72	return rc;
73}
74
75static void __init sclp_read_info_early(void)
76{
77	int rc;
78	int i;
79	struct read_info_sccb *sccb;
80	sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
81				  SCLP_CMDW_READ_SCP_INFO};
82
83	sccb = &early_read_info_sccb;
84	for (i = 0; i < ARRAY_SIZE(commands); i++) {
85		do {
86			memset(sccb, 0, sizeof(*sccb));
87			sccb->header.length = sizeof(*sccb);
88			sccb->header.function_code = 0x80;
89			sccb->header.control_mask[2] = 0x80;
90			rc = sclp_cmd_sync_early(commands[i], sccb);
91		} while (rc == -EBUSY);
92
93		if (rc)
94			break;
95		if (sccb->header.response_code == 0x10) {
96			early_read_info_sccb_valid = 1;
97			break;
98		}
99		if (sccb->header.response_code != 0x1f0)
100			break;
101	}
102}
103
104void __init sclp_facilities_detect(void)
105{
106	struct read_info_sccb *sccb;
107
108	sclp_read_info_early();
109	if (!early_read_info_sccb_valid)
110		return;
111
112	sccb = &early_read_info_sccb;
113	sclp_facilities = sccb->facilities;
114	sclp_fac84 = sccb->fac84;
115	rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
116	rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
117	rzm <<= 20;
118}
119
120unsigned long long sclp_get_rnmax(void)
121{
122	return rnmax;
123}
124
125unsigned long long sclp_get_rzm(void)
126{
127	return rzm;
128}
129
130/*
131 * This function will be called after sclp_facilities_detect(), which gets
132 * called from early.c code. Therefore the sccb should have valid contents.
133 */
134void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
135{
136	struct read_info_sccb *sccb;
137
138	if (!early_read_info_sccb_valid)
139		return;
140	sccb = &early_read_info_sccb;
141	info->is_valid = 1;
142	if (sccb->flags & 0x2)
143		info->has_dump = 1;
144	memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN);
145}
146
147static void sclp_sync_callback(struct sclp_req *req, void *data)
148{
149	struct completion *completion = data;
150
151	complete(completion);
152}
153
154static int do_sync_request(sclp_cmdw_t cmd, void *sccb)
155{
156	struct completion completion;
157	struct sclp_req *request;
158	int rc;
159
160	request = kzalloc(sizeof(*request), GFP_KERNEL);
161	if (!request)
162		return -ENOMEM;
163	request->command = cmd;
164	request->sccb = sccb;
165	request->status = SCLP_REQ_FILLED;
166	request->callback = sclp_sync_callback;
167	request->callback_data = &completion;
168	init_completion(&completion);
169
170	/* Perform sclp request. */
171	rc = sclp_add_request(request);
172	if (rc)
173		goto out;
174	wait_for_completion(&completion);
175
176	/* Check response. */
177	if (request->status != SCLP_REQ_DONE) {
178		pr_warning("sync request failed (cmd=0x%08x, "
179			   "status=0x%02x)\n", cmd, request->status);
180		rc = -EIO;
181	}
182out:
183	kfree(request);
184	return rc;
185}
186
187/*
188 * CPU configuration related functions.
189 */
190
191#define SCLP_CMDW_READ_CPU_INFO		0x00010001
192#define SCLP_CMDW_CONFIGURE_CPU		0x00110001
193#define SCLP_CMDW_DECONFIGURE_CPU	0x00100001
194
195struct read_cpu_info_sccb {
196	struct	sccb_header header;
197	u16	nr_configured;
198	u16	offset_configured;
199	u16	nr_standby;
200	u16	offset_standby;
201	u8	reserved[4096 - 16];
202} __attribute__((packed, aligned(PAGE_SIZE)));
203
204static void sclp_fill_cpu_info(struct sclp_cpu_info *info,
205			       struct read_cpu_info_sccb *sccb)
206{
207	char *page = (char *) sccb;
208
209	memset(info, 0, sizeof(*info));
210	info->configured = sccb->nr_configured;
211	info->standby = sccb->nr_standby;
212	info->combined = sccb->nr_configured + sccb->nr_standby;
213	info->has_cpu_type = sclp_fac84 & 0x1;
214	memcpy(&info->cpu, page + sccb->offset_configured,
215	       info->combined * sizeof(struct sclp_cpu_entry));
216}
217
218int sclp_get_cpu_info(struct sclp_cpu_info *info)
219{
220	int rc;
221	struct read_cpu_info_sccb *sccb;
222
223	if (!SCLP_HAS_CPU_INFO)
224		return -EOPNOTSUPP;
225	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
226	if (!sccb)
227		return -ENOMEM;
228	sccb->header.length = sizeof(*sccb);
229	rc = do_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb);
230	if (rc)
231		goto out;
232	if (sccb->header.response_code != 0x0010) {
233		pr_warning("readcpuinfo failed (response=0x%04x)\n",
234			   sccb->header.response_code);
235		rc = -EIO;
236		goto out;
237	}
238	sclp_fill_cpu_info(info, sccb);
239out:
240	free_page((unsigned long) sccb);
241	return rc;
242}
243
244struct cpu_configure_sccb {
245	struct sccb_header header;
246} __attribute__((packed, aligned(8)));
247
248static int do_cpu_configure(sclp_cmdw_t cmd)
249{
250	struct cpu_configure_sccb *sccb;
251	int rc;
252
253	if (!SCLP_HAS_CPU_RECONFIG)
254		return -EOPNOTSUPP;
255	/*
256	 * This is not going to cross a page boundary since we force
257	 * kmalloc to have a minimum alignment of 8 bytes on s390.
258	 */
259	sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
260	if (!sccb)
261		return -ENOMEM;
262	sccb->header.length = sizeof(*sccb);
263	rc = do_sync_request(cmd, sccb);
264	if (rc)
265		goto out;
266	switch (sccb->header.response_code) {
267	case 0x0020:
268	case 0x0120:
269		break;
270	default:
271		pr_warning("configure cpu failed (cmd=0x%08x, "
272			   "response=0x%04x)\n", cmd,
273			   sccb->header.response_code);
274		rc = -EIO;
275		break;
276	}
277out:
278	kfree(sccb);
279	return rc;
280}
281
282int sclp_cpu_configure(u8 cpu)
283{
284	return do_cpu_configure(SCLP_CMDW_CONFIGURE_CPU | cpu << 8);
285}
286
287int sclp_cpu_deconfigure(u8 cpu)
288{
289	return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8);
290}
291
292#ifdef CONFIG_MEMORY_HOTPLUG
293
294static DEFINE_MUTEX(sclp_mem_mutex);
295static LIST_HEAD(sclp_mem_list);
296static u8 sclp_max_storage_id;
297static unsigned long sclp_storage_ids[256 / BITS_PER_LONG];
298static int sclp_mem_state_changed;
299
300struct memory_increment {
301	struct list_head list;
302	u16 rn;
303	int standby;
304	int usecount;
305};
306
307struct assign_storage_sccb {
308	struct sccb_header header;
309	u16 rn;
310} __packed;
311
312int arch_get_memory_phys_device(unsigned long start_pfn)
313{
314	if (!rzm)
315		return 0;
316	return PFN_PHYS(start_pfn) >> ilog2(rzm);
317}
318
319static unsigned long long rn2addr(u16 rn)
320{
321	return (unsigned long long) (rn - 1) * rzm;
322}
323
324static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
325{
326	struct assign_storage_sccb *sccb;
327	int rc;
328
329	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
330	if (!sccb)
331		return -ENOMEM;
332	sccb->header.length = PAGE_SIZE;
333	sccb->rn = rn;
334	rc = do_sync_request(cmd, sccb);
335	if (rc)
336		goto out;
337	switch (sccb->header.response_code) {
338	case 0x0020:
339	case 0x0120:
340		break;
341	default:
342		pr_warning("assign storage failed (cmd=0x%08x, "
343			   "response=0x%04x, rn=0x%04x)\n", cmd,
344			   sccb->header.response_code, rn);
345		rc = -EIO;
346		break;
347	}
348out:
349	free_page((unsigned long) sccb);
350	return rc;
351}
352
353static int sclp_assign_storage(u16 rn)
354{
355	return do_assign_storage(0x000d0001, rn);
356}
357
358static int sclp_unassign_storage(u16 rn)
359{
360	return do_assign_storage(0x000c0001, rn);
361}
362
363struct attach_storage_sccb {
364	struct sccb_header header;
365	u16 :16;
366	u16 assigned;
367	u32 :32;
368	u32 entries[0];
369} __packed;
370
371static int sclp_attach_storage(u8 id)
372{
373	struct attach_storage_sccb *sccb;
374	int rc;
375	int i;
376
377	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
378	if (!sccb)
379		return -ENOMEM;
380	sccb->header.length = PAGE_SIZE;
381	rc = do_sync_request(0x00080001 | id << 8, sccb);
382	if (rc)
383		goto out;
384	switch (sccb->header.response_code) {
385	case 0x0020:
386		set_bit(id, sclp_storage_ids);
387		for (i = 0; i < sccb->assigned; i++) {
388			if (sccb->entries[i])
389				sclp_unassign_storage(sccb->entries[i] >> 16);
390		}
391		break;
392	default:
393		rc = -EIO;
394		break;
395	}
396out:
397	free_page((unsigned long) sccb);
398	return rc;
399}
400
401static int sclp_mem_change_state(unsigned long start, unsigned long size,
402				 int online)
403{
404	struct memory_increment *incr;
405	unsigned long long istart;
406	int rc = 0;
407
408	list_for_each_entry(incr, &sclp_mem_list, list) {
409		istart = rn2addr(incr->rn);
410		if (start + size - 1 < istart)
411			break;
412		if (start > istart + rzm - 1)
413			continue;
414		if (online) {
415			if (incr->usecount++)
416				continue;
417			/*
418			 * Don't break the loop if one assign fails. Loop may
419			 * be walked again on CANCEL and we can't save
420			 * information if state changed before or not.
421			 * So continue and increase usecount for all increments.
422			 */
423			rc |= sclp_assign_storage(incr->rn);
424		} else {
425			if (--incr->usecount)
426				continue;
427			sclp_unassign_storage(incr->rn);
428		}
429	}
430	return rc ? -EIO : 0;
431}
432
433static int sclp_mem_notifier(struct notifier_block *nb,
434			     unsigned long action, void *data)
435{
436	unsigned long start, size;
437	struct memory_notify *arg;
438	unsigned char id;
439	int rc = 0;
440
441	arg = data;
442	start = arg->start_pfn << PAGE_SHIFT;
443	size = arg->nr_pages << PAGE_SHIFT;
444	mutex_lock(&sclp_mem_mutex);
445	for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
446		sclp_attach_storage(id);
447	switch (action) {
448	case MEM_ONLINE:
449	case MEM_GOING_OFFLINE:
450	case MEM_CANCEL_OFFLINE:
451		break;
452	case MEM_GOING_ONLINE:
453		rc = sclp_mem_change_state(start, size, 1);
454		break;
455	case MEM_CANCEL_ONLINE:
456		sclp_mem_change_state(start, size, 0);
457		break;
458	case MEM_OFFLINE:
459		sclp_mem_change_state(start, size, 0);
460		break;
461	default:
462		rc = -EINVAL;
463		break;
464	}
465	if (!rc)
466		sclp_mem_state_changed = 1;
467	mutex_unlock(&sclp_mem_mutex);
468	return rc ? NOTIFY_BAD : NOTIFY_OK;
469}
470
471static struct notifier_block sclp_mem_nb = {
472	.notifier_call = sclp_mem_notifier,
473};
474
475static void __init add_memory_merged(u16 rn)
476{
477	static u16 first_rn, num;
478	unsigned long long start, size;
479
480	if (rn && first_rn && (first_rn + num == rn)) {
481		num++;
482		return;
483	}
484	if (!first_rn)
485		goto skip_add;
486	start = rn2addr(first_rn);
487	size = (unsigned long long ) num * rzm;
488	if (start >= VMEM_MAX_PHYS)
489		goto skip_add;
490	if (start + size > VMEM_MAX_PHYS)
491		size = VMEM_MAX_PHYS - start;
492	if (memory_end_set && (start >= memory_end))
493		goto skip_add;
494	if (memory_end_set && (start + size > memory_end))
495		size = memory_end - start;
496	add_memory(0, start, size);
497skip_add:
498	first_rn = rn;
499	num = 1;
500}
501
502static void __init sclp_add_standby_memory(void)
503{
504	struct memory_increment *incr;
505
506	list_for_each_entry(incr, &sclp_mem_list, list)
507		if (incr->standby)
508			add_memory_merged(incr->rn);
509	add_memory_merged(0);
510}
511
512static void __init insert_increment(u16 rn, int standby, int assigned)
513{
514	struct memory_increment *incr, *new_incr;
515	struct list_head *prev;
516	u16 last_rn;
517
518	new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
519	if (!new_incr)
520		return;
521	new_incr->rn = rn;
522	new_incr->standby = standby;
523	if (!standby)
524		new_incr->usecount = 1;
525	last_rn = 0;
526	prev = &sclp_mem_list;
527	list_for_each_entry(incr, &sclp_mem_list, list) {
528		if (assigned && incr->rn > rn)
529			break;
530		if (!assigned && incr->rn - last_rn > 1)
531			break;
532		last_rn = incr->rn;
533		prev = &incr->list;
534	}
535	if (!assigned)
536		new_incr->rn = last_rn + 1;
537	if (new_incr->rn > rnmax) {
538		kfree(new_incr);
539		return;
540	}
541	list_add(&new_incr->list, prev);
542}
543
544static int sclp_mem_freeze(struct device *dev)
545{
546	if (!sclp_mem_state_changed)
547		return 0;
548	pr_err("Memory hotplug state changed, suspend refused.\n");
549	return -EPERM;
550}
551
552struct read_storage_sccb {
553	struct sccb_header header;
554	u16 max_id;
555	u16 assigned;
556	u16 standby;
557	u16 :16;
558	u32 entries[0];
559} __packed;
560
561static const struct dev_pm_ops sclp_mem_pm_ops = {
562	.freeze		= sclp_mem_freeze,
563};
564
565static struct platform_driver sclp_mem_pdrv = {
566	.driver = {
567		.name	= "sclp_mem",
568		.pm	= &sclp_mem_pm_ops,
569	},
570};
571
572static int __init sclp_detect_standby_memory(void)
573{
574	struct platform_device *sclp_pdev;
575	struct read_storage_sccb *sccb;
576	int i, id, assigned, rc;
577
578	if (!early_read_info_sccb_valid)
579		return 0;
580	if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
581		return 0;
582	rc = -ENOMEM;
583	sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
584	if (!sccb)
585		goto out;
586	assigned = 0;
587	for (id = 0; id <= sclp_max_storage_id; id++) {
588		memset(sccb, 0, PAGE_SIZE);
589		sccb->header.length = PAGE_SIZE;
590		rc = do_sync_request(0x00040001 | id << 8, sccb);
591		if (rc)
592			goto out;
593		switch (sccb->header.response_code) {
594		case 0x0010:
595			set_bit(id, sclp_storage_ids);
596			for (i = 0; i < sccb->assigned; i++) {
597				if (!sccb->entries[i])
598					continue;
599				assigned++;
600				insert_increment(sccb->entries[i] >> 16, 0, 1);
601			}
602			break;
603		case 0x0310:
604			break;
605		case 0x0410:
606			for (i = 0; i < sccb->assigned; i++) {
607				if (!sccb->entries[i])
608					continue;
609				assigned++;
610				insert_increment(sccb->entries[i] >> 16, 1, 1);
611			}
612			break;
613		default:
614			rc = -EIO;
615			break;
616		}
617		if (!rc)
618			sclp_max_storage_id = sccb->max_id;
619	}
620	if (rc || list_empty(&sclp_mem_list))
621		goto out;
622	for (i = 1; i <= rnmax - assigned; i++)
623		insert_increment(0, 1, 0);
624	rc = register_memory_notifier(&sclp_mem_nb);
625	if (rc)
626		goto out;
627	rc = platform_driver_register(&sclp_mem_pdrv);
628	if (rc)
629		goto out;
630	sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0);
631	rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0;
632	if (rc)
633		goto out_driver;
634	sclp_add_standby_memory();
635	goto out;
636out_driver:
637	platform_driver_unregister(&sclp_mem_pdrv);
638out:
639	free_page((unsigned long) sccb);
640	return rc;
641}
642__initcall(sclp_detect_standby_memory);
643
644#endif /* CONFIG_MEMORY_HOTPLUG */
645
646/*
647 * Channel path configuration related functions.
648 */
649
650#define SCLP_CMDW_CONFIGURE_CHPATH		0x000f0001
651#define SCLP_CMDW_DECONFIGURE_CHPATH		0x000e0001
652#define SCLP_CMDW_READ_CHPATH_INFORMATION	0x00030001
653
654struct chp_cfg_sccb {
655	struct sccb_header header;
656	u8 ccm;
657	u8 reserved[6];
658	u8 cssid;
659} __attribute__((packed));
660
661static int do_chp_configure(sclp_cmdw_t cmd)
662{
663	struct chp_cfg_sccb *sccb;
664	int rc;
665
666	if (!SCLP_HAS_CHP_RECONFIG)
667		return -EOPNOTSUPP;
668	/* Prepare sccb. */
669	sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
670	if (!sccb)
671		return -ENOMEM;
672	sccb->header.length = sizeof(*sccb);
673	rc = do_sync_request(cmd, sccb);
674	if (rc)
675		goto out;
676	switch (sccb->header.response_code) {
677	case 0x0020:
678	case 0x0120:
679	case 0x0440:
680	case 0x0450:
681		break;
682	default:
683		pr_warning("configure channel-path failed "
684			   "(cmd=0x%08x, response=0x%04x)\n", cmd,
685			   sccb->header.response_code);
686		rc = -EIO;
687		break;
688	}
689out:
690	free_page((unsigned long) sccb);
691	return rc;
692}
693
694/**
695 * sclp_chp_configure - perform configure channel-path sclp command
696 * @chpid: channel-path ID
697 *
698 * Perform configure channel-path command sclp command for specified chpid.
699 * Return 0 after command successfully finished, non-zero otherwise.
700 */
701int sclp_chp_configure(struct chp_id chpid)
702{
703	return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8);
704}
705
706/**
707 * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
708 * @chpid: channel-path ID
709 *
710 * Perform deconfigure channel-path command sclp command for specified chpid
711 * and wait for completion. On success return 0. Return non-zero otherwise.
712 */
713int sclp_chp_deconfigure(struct chp_id chpid)
714{
715	return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
716}
717
718struct chp_info_sccb {
719	struct sccb_header header;
720	u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
721	u8 standby[SCLP_CHP_INFO_MASK_SIZE];
722	u8 configured[SCLP_CHP_INFO_MASK_SIZE];
723	u8 ccm;
724	u8 reserved[6];
725	u8 cssid;
726} __attribute__((packed));
727
728/**
729 * sclp_chp_read_info - perform read channel-path information sclp command
730 * @info: resulting channel-path information data
731 *
732 * Perform read channel-path information sclp command and wait for completion.
733 * On success, store channel-path information in @info and return 0. Return
734 * non-zero otherwise.
735 */
736int sclp_chp_read_info(struct sclp_chp_info *info)
737{
738	struct chp_info_sccb *sccb;
739	int rc;
740
741	if (!SCLP_HAS_CHP_INFO)
742		return -EOPNOTSUPP;
743	/* Prepare sccb. */
744	sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
745	if (!sccb)
746		return -ENOMEM;
747	sccb->header.length = sizeof(*sccb);
748	rc = do_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb);
749	if (rc)
750		goto out;
751	if (sccb->header.response_code != 0x0010) {
752		pr_warning("read channel-path info failed "
753			   "(response=0x%04x)\n", sccb->header.response_code);
754		rc = -EIO;
755		goto out;
756	}
757	memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE);
758	memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
759	memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
760out:
761	free_page((unsigned long) sccb);
762	return rc;
763}
764