chsc.c revision 231caa1cac555bdd5cb64bf91b554b38e8ddf715
1/* 2 * drivers/s390/cio/chsc.c 3 * S/390 common I/O routines -- channel subsystem call 4 * 5 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, 6 * IBM Corporation 7 * Author(s): Ingo Adlung (adlung@de.ibm.com) 8 * Cornelia Huck (cornelia.huck@de.ibm.com) 9 * Arnd Bergmann (arndb@de.ibm.com) 10 */ 11 12#include <linux/module.h> 13#include <linux/config.h> 14#include <linux/slab.h> 15#include <linux/init.h> 16#include <linux/device.h> 17 18#include <asm/cio.h> 19 20#include "css.h" 21#include "cio.h" 22#include "cio_debug.h" 23#include "ioasm.h" 24#include "chsc.h" 25 26static void *sei_page; 27 28static int new_channel_path(int chpid); 29 30static inline void 31set_chp_logically_online(int chp, int onoff) 32{ 33 css[0]->chps[chp]->state = onoff; 34} 35 36static int 37get_chp_status(int chp) 38{ 39 return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV); 40} 41 42void 43chsc_validate_chpids(struct subchannel *sch) 44{ 45 int mask, chp; 46 47 for (chp = 0; chp <= 7; chp++) { 48 mask = 0x80 >> chp; 49 if (!get_chp_status(sch->schib.pmcw.chpid[chp])) 50 /* disable using this path */ 51 sch->opm &= ~mask; 52 } 53} 54 55void 56chpid_is_actually_online(int chp) 57{ 58 int state; 59 60 state = get_chp_status(chp); 61 if (state < 0) { 62 need_rescan = 1; 63 queue_work(slow_path_wq, &slow_path_work); 64 } else 65 WARN_ON(!state); 66} 67 68/* FIXME: this is _always_ called for every subchannel. shouldn't we 69 * process more than one at a time? */ 70static int 71chsc_get_sch_desc_irq(struct subchannel *sch, void *page) 72{ 73 int ccode, j; 74 75 struct { 76 struct chsc_header request; 77 u16 reserved1a:10; 78 u16 ssid:2; 79 u16 reserved1b:4; 80 u16 f_sch; /* first subchannel */ 81 u16 reserved2; 82 u16 l_sch; /* last subchannel */ 83 u32 reserved3; 84 struct chsc_header response; 85 u32 reserved4; 86 u8 sch_valid : 1; 87 u8 dev_valid : 1; 88 u8 st : 3; /* subchannel type */ 89 u8 zeroes : 3; 90 u8 unit_addr; /* unit address */ 91 u16 devno; /* device number */ 92 u8 path_mask; 93 u8 fla_valid_mask; 94 u16 sch; /* subchannel */ 95 u8 chpid[8]; /* chpids 0-7 */ 96 u16 fla[8]; /* full link addresses 0-7 */ 97 } *ssd_area; 98 99 ssd_area = page; 100 101 ssd_area->request.length = 0x0010; 102 ssd_area->request.code = 0x0004; 103 104 ssd_area->ssid = sch->schid.ssid; 105 ssd_area->f_sch = sch->schid.sch_no; 106 ssd_area->l_sch = sch->schid.sch_no; 107 108 ccode = chsc(ssd_area); 109 if (ccode > 0) { 110 pr_debug("chsc returned with ccode = %d\n", ccode); 111 return (ccode == 3) ? -ENODEV : -EBUSY; 112 } 113 114 switch (ssd_area->response.code) { 115 case 0x0001: /* everything ok */ 116 break; 117 case 0x0002: 118 CIO_CRW_EVENT(2, "Invalid command!\n"); 119 return -EINVAL; 120 case 0x0003: 121 CIO_CRW_EVENT(2, "Error in chsc request block!\n"); 122 return -EINVAL; 123 case 0x0004: 124 CIO_CRW_EVENT(2, "Model does not provide ssd\n"); 125 return -EOPNOTSUPP; 126 default: 127 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", 128 ssd_area->response.code); 129 return -EIO; 130 } 131 132 /* 133 * ssd_area->st stores the type of the detected 134 * subchannel, with the following definitions: 135 * 136 * 0: I/O subchannel: All fields have meaning 137 * 1: CHSC subchannel: Only sch_val, st and sch 138 * have meaning 139 * 2: Message subchannel: All fields except unit_addr 140 * have meaning 141 * 3: ADM subchannel: Only sch_val, st and sch 142 * have meaning 143 * 144 * Other types are currently undefined. 145 */ 146 if (ssd_area->st > 3) { /* uhm, that looks strange... */ 147 CIO_CRW_EVENT(0, "Strange subchannel type %d" 148 " for sch 0.%x.%04x\n", ssd_area->st, 149 sch->schid.ssid, sch->schid.sch_no); 150 /* 151 * There may have been a new subchannel type defined in the 152 * time since this code was written; since we don't know which 153 * fields have meaning and what to do with it we just jump out 154 */ 155 return 0; 156 } else { 157 const char *type[4] = {"I/O", "chsc", "message", "ADM"}; 158 CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n", 159 sch->schid.ssid, sch->schid.sch_no, 160 type[ssd_area->st]); 161 162 sch->ssd_info.valid = 1; 163 sch->ssd_info.type = ssd_area->st; 164 } 165 166 if (ssd_area->st == 0 || ssd_area->st == 2) { 167 for (j = 0; j < 8; j++) { 168 if (!((0x80 >> j) & ssd_area->path_mask & 169 ssd_area->fla_valid_mask)) 170 continue; 171 sch->ssd_info.chpid[j] = ssd_area->chpid[j]; 172 sch->ssd_info.fla[j] = ssd_area->fla[j]; 173 } 174 } 175 return 0; 176} 177 178int 179css_get_ssd_info(struct subchannel *sch) 180{ 181 int ret; 182 void *page; 183 184 page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 185 if (!page) 186 return -ENOMEM; 187 spin_lock_irq(&sch->lock); 188 ret = chsc_get_sch_desc_irq(sch, page); 189 if (ret) { 190 static int cio_chsc_err_msg; 191 192 if (!cio_chsc_err_msg) { 193 printk(KERN_ERR 194 "chsc_get_sch_descriptions:" 195 " Error %d while doing chsc; " 196 "processing some machine checks may " 197 "not work\n", ret); 198 cio_chsc_err_msg = 1; 199 } 200 } 201 spin_unlock_irq(&sch->lock); 202 free_page((unsigned long)page); 203 if (!ret) { 204 int j, chpid; 205 /* Allocate channel path structures, if needed. */ 206 for (j = 0; j < 8; j++) { 207 chpid = sch->ssd_info.chpid[j]; 208 if (chpid && (get_chp_status(chpid) < 0)) 209 new_channel_path(chpid); 210 } 211 } 212 return ret; 213} 214 215static int 216s390_subchannel_remove_chpid(struct device *dev, void *data) 217{ 218 int j; 219 int mask; 220 struct subchannel *sch; 221 struct channel_path *chpid; 222 struct schib schib; 223 224 sch = to_subchannel(dev); 225 chpid = data; 226 for (j = 0; j < 8; j++) 227 if (sch->schib.pmcw.chpid[j] == chpid->id) 228 break; 229 if (j >= 8) 230 return 0; 231 232 mask = 0x80 >> j; 233 spin_lock_irq(&sch->lock); 234 235 stsch(sch->schid, &schib); 236 if (!schib.pmcw.dnv) 237 goto out_unreg; 238 memcpy(&sch->schib, &schib, sizeof(struct schib)); 239 /* Check for single path devices. */ 240 if (sch->schib.pmcw.pim == 0x80) 241 goto out_unreg; 242 if (sch->vpm == mask) 243 goto out_unreg; 244 245 if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) && 246 (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) && 247 (sch->schib.pmcw.lpum == mask) && 248 (sch->vpm == 0)) { 249 int cc; 250 251 cc = cio_clear(sch); 252 if (cc == -ENODEV) 253 goto out_unreg; 254 /* Call handler. */ 255 if (sch->driver && sch->driver->termination) 256 sch->driver->termination(&sch->dev); 257 goto out_unlock; 258 } 259 260 /* trigger path verification. */ 261 if (sch->driver && sch->driver->verify) 262 sch->driver->verify(&sch->dev); 263out_unlock: 264 spin_unlock_irq(&sch->lock); 265 return 0; 266out_unreg: 267 spin_unlock_irq(&sch->lock); 268 sch->lpm = 0; 269 if (css_enqueue_subchannel_slow(sch->schid)) { 270 css_clear_subchannel_slow_list(); 271 need_rescan = 1; 272 } 273 return 0; 274} 275 276static inline void 277s390_set_chpid_offline( __u8 chpid) 278{ 279 char dbf_txt[15]; 280 struct device *dev; 281 282 sprintf(dbf_txt, "chpr%x", chpid); 283 CIO_TRACE_EVENT(2, dbf_txt); 284 285 if (get_chp_status(chpid) <= 0) 286 return; 287 dev = get_device(&css[0]->chps[chpid]->dev); 288 bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev), 289 s390_subchannel_remove_chpid); 290 291 if (need_rescan || css_slow_subchannels_exist()) 292 queue_work(slow_path_wq, &slow_path_work); 293 put_device(dev); 294} 295 296struct res_acc_data { 297 struct channel_path *chp; 298 u32 fla_mask; 299 u16 fla; 300}; 301 302static int 303s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch) 304{ 305 int found; 306 int chp; 307 int ccode; 308 309 found = 0; 310 for (chp = 0; chp <= 7; chp++) 311 /* 312 * check if chpid is in information updated by ssd 313 */ 314 if (sch->ssd_info.valid && 315 sch->ssd_info.chpid[chp] == res_data->chp->id && 316 (sch->ssd_info.fla[chp] & res_data->fla_mask) 317 == res_data->fla) { 318 found = 1; 319 break; 320 } 321 322 if (found == 0) 323 return 0; 324 325 /* 326 * Do a stsch to update our subchannel structure with the 327 * new path information and eventually check for logically 328 * offline chpids. 329 */ 330 ccode = stsch(sch->schid, &sch->schib); 331 if (ccode > 0) 332 return 0; 333 334 return 0x80 >> chp; 335} 336 337static inline int 338s390_process_res_acc_new_sch(struct subchannel_id schid) 339{ 340 struct schib schib; 341 int ret; 342 /* 343 * We don't know the device yet, but since a path 344 * may be available now to the device we'll have 345 * to do recognition again. 346 * Since we don't have any idea about which chpid 347 * that beast may be on we'll have to do a stsch 348 * on all devices, grr... 349 */ 350 if (stsch_err(schid, &schib)) 351 /* We're through */ 352 return need_rescan ? -EAGAIN : -ENXIO; 353 354 /* Put it on the slow path. */ 355 ret = css_enqueue_subchannel_slow(schid); 356 if (ret) { 357 css_clear_subchannel_slow_list(); 358 need_rescan = 1; 359 return -EAGAIN; 360 } 361 return 0; 362} 363 364static int 365__s390_process_res_acc(struct subchannel_id schid, void *data) 366{ 367 int chp_mask, old_lpm; 368 struct res_acc_data *res_data; 369 struct subchannel *sch; 370 371 res_data = (struct res_acc_data *)data; 372 sch = get_subchannel_by_schid(schid); 373 if (!sch) 374 /* Check if a subchannel is newly available. */ 375 return s390_process_res_acc_new_sch(schid); 376 377 spin_lock_irq(&sch->lock); 378 379 chp_mask = s390_process_res_acc_sch(res_data, sch); 380 381 if (chp_mask == 0) { 382 spin_unlock_irq(&sch->lock); 383 return 0; 384 } 385 old_lpm = sch->lpm; 386 sch->lpm = ((sch->schib.pmcw.pim & 387 sch->schib.pmcw.pam & 388 sch->schib.pmcw.pom) 389 | chp_mask) & sch->opm; 390 if (!old_lpm && sch->lpm) 391 device_trigger_reprobe(sch); 392 else if (sch->driver && sch->driver->verify) 393 sch->driver->verify(&sch->dev); 394 395 spin_unlock_irq(&sch->lock); 396 put_device(&sch->dev); 397 return (res_data->fla_mask == 0xffff) ? -ENODEV : 0; 398} 399 400 401static int 402s390_process_res_acc (struct res_acc_data *res_data) 403{ 404 int rc; 405 char dbf_txt[15]; 406 407 sprintf(dbf_txt, "accpr%x", res_data->chp->id); 408 CIO_TRACE_EVENT( 2, dbf_txt); 409 if (res_data->fla != 0) { 410 sprintf(dbf_txt, "fla%x", res_data->fla); 411 CIO_TRACE_EVENT( 2, dbf_txt); 412 } 413 414 /* 415 * I/O resources may have become accessible. 416 * Scan through all subchannels that may be concerned and 417 * do a validation on those. 418 * The more information we have (info), the less scanning 419 * will we have to do. 420 */ 421 rc = for_each_subchannel(__s390_process_res_acc, res_data); 422 if (css_slow_subchannels_exist()) 423 rc = -EAGAIN; 424 else if (rc != -EAGAIN) 425 rc = 0; 426 return rc; 427} 428 429static int 430__get_chpid_from_lir(void *data) 431{ 432 struct lir { 433 u8 iq; 434 u8 ic; 435 u16 sci; 436 /* incident-node descriptor */ 437 u32 indesc[28]; 438 /* attached-node descriptor */ 439 u32 andesc[28]; 440 /* incident-specific information */ 441 u32 isinfo[28]; 442 } *lir; 443 444 lir = (struct lir*) data; 445 if (!(lir->iq&0x80)) 446 /* NULL link incident record */ 447 return -EINVAL; 448 if (!(lir->indesc[0]&0xc0000000)) 449 /* node descriptor not valid */ 450 return -EINVAL; 451 if (!(lir->indesc[0]&0x10000000)) 452 /* don't handle device-type nodes - FIXME */ 453 return -EINVAL; 454 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */ 455 456 return (u16) (lir->indesc[0]&0x000000ff); 457} 458 459int 460chsc_process_crw(void) 461{ 462 int chpid, ret; 463 struct res_acc_data res_data; 464 struct { 465 struct chsc_header request; 466 u32 reserved1; 467 u32 reserved2; 468 u32 reserved3; 469 struct chsc_header response; 470 u32 reserved4; 471 u8 flags; 472 u8 vf; /* validity flags */ 473 u8 rs; /* reporting source */ 474 u8 cc; /* content code */ 475 u16 fla; /* full link address */ 476 u16 rsid; /* reporting source id */ 477 u32 reserved5; 478 u32 reserved6; 479 u32 ccdf[96]; /* content-code dependent field */ 480 /* ccdf has to be big enough for a link-incident record */ 481 } *sei_area; 482 483 if (!sei_page) 484 return 0; 485 /* 486 * build the chsc request block for store event information 487 * and do the call 488 * This function is only called by the machine check handler thread, 489 * so we don't need locking for the sei_page. 490 */ 491 sei_area = sei_page; 492 493 CIO_TRACE_EVENT( 2, "prcss"); 494 ret = 0; 495 do { 496 int ccode, status; 497 struct device *dev; 498 memset(sei_area, 0, sizeof(*sei_area)); 499 memset(&res_data, 0, sizeof(struct res_acc_data)); 500 sei_area->request.length = 0x0010; 501 sei_area->request.code = 0x000e; 502 503 ccode = chsc(sei_area); 504 if (ccode > 0) 505 return 0; 506 507 switch (sei_area->response.code) { 508 /* for debug purposes, check for problems */ 509 case 0x0001: 510 CIO_CRW_EVENT(4, "chsc_process_crw: event information " 511 "successfully stored\n"); 512 break; /* everything ok */ 513 case 0x0002: 514 CIO_CRW_EVENT(2, 515 "chsc_process_crw: invalid command!\n"); 516 return 0; 517 case 0x0003: 518 CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc " 519 "request block!\n"); 520 return 0; 521 case 0x0005: 522 CIO_CRW_EVENT(2, "chsc_process_crw: no event " 523 "information stored\n"); 524 return 0; 525 default: 526 CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n", 527 sei_area->response.code); 528 return 0; 529 } 530 531 /* Check if we might have lost some information. */ 532 if (sei_area->flags & 0x40) 533 CIO_CRW_EVENT(2, "chsc_process_crw: Event information " 534 "has been lost due to overflow!\n"); 535 536 if (sei_area->rs != 4) { 537 CIO_CRW_EVENT(2, "chsc_process_crw: reporting source " 538 "(%04X) isn't a chpid!\n", 539 sei_area->rsid); 540 continue; 541 } 542 543 /* which kind of information was stored? */ 544 switch (sei_area->cc) { 545 case 1: /* link incident*/ 546 CIO_CRW_EVENT(4, "chsc_process_crw: " 547 "channel subsystem reports link incident," 548 " reporting source is chpid %x\n", 549 sei_area->rsid); 550 chpid = __get_chpid_from_lir(sei_area->ccdf); 551 if (chpid < 0) 552 CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n", 553 __FUNCTION__); 554 else 555 s390_set_chpid_offline(chpid); 556 break; 557 558 case 2: /* i/o resource accessibiliy */ 559 CIO_CRW_EVENT(4, "chsc_process_crw: " 560 "channel subsystem reports some I/O " 561 "devices may have become accessible\n"); 562 pr_debug("Data received after sei: \n"); 563 pr_debug("Validity flags: %x\n", sei_area->vf); 564 565 /* allocate a new channel path structure, if needed */ 566 status = get_chp_status(sei_area->rsid); 567 if (status < 0) 568 new_channel_path(sei_area->rsid); 569 else if (!status) 570 break; 571 dev = get_device(&css[0]->chps[sei_area->rsid]->dev); 572 res_data.chp = to_channelpath(dev); 573 pr_debug("chpid: %x", sei_area->rsid); 574 if ((sei_area->vf & 0xc0) != 0) { 575 res_data.fla = sei_area->fla; 576 if ((sei_area->vf & 0xc0) == 0xc0) { 577 pr_debug(" full link addr: %x", 578 sei_area->fla); 579 res_data.fla_mask = 0xffff; 580 } else { 581 pr_debug(" link addr: %x", 582 sei_area->fla); 583 res_data.fla_mask = 0xff00; 584 } 585 } 586 ret = s390_process_res_acc(&res_data); 587 pr_debug("\n\n"); 588 put_device(dev); 589 break; 590 591 default: /* other stuff */ 592 CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n", 593 sei_area->cc); 594 break; 595 } 596 } while (sei_area->flags & 0x80); 597 return ret; 598} 599 600static inline int 601__chp_add_new_sch(struct subchannel_id schid) 602{ 603 struct schib schib; 604 int ret; 605 606 if (stsch(schid, &schib)) 607 /* We're through */ 608 return need_rescan ? -EAGAIN : -ENXIO; 609 610 /* Put it on the slow path. */ 611 ret = css_enqueue_subchannel_slow(schid); 612 if (ret) { 613 css_clear_subchannel_slow_list(); 614 need_rescan = 1; 615 return -EAGAIN; 616 } 617 return 0; 618} 619 620 621static int 622__chp_add(struct subchannel_id schid, void *data) 623{ 624 int i; 625 struct channel_path *chp; 626 struct subchannel *sch; 627 628 chp = (struct channel_path *)data; 629 sch = get_subchannel_by_schid(schid); 630 if (!sch) 631 /* Check if the subchannel is now available. */ 632 return __chp_add_new_sch(schid); 633 spin_lock_irq(&sch->lock); 634 for (i=0; i<8; i++) 635 if (sch->schib.pmcw.chpid[i] == chp->id) { 636 if (stsch(sch->schid, &sch->schib) != 0) { 637 /* Endgame. */ 638 spin_unlock_irq(&sch->lock); 639 return -ENXIO; 640 } 641 break; 642 } 643 if (i==8) { 644 spin_unlock_irq(&sch->lock); 645 return 0; 646 } 647 sch->lpm = ((sch->schib.pmcw.pim & 648 sch->schib.pmcw.pam & 649 sch->schib.pmcw.pom) 650 | 0x80 >> i) & sch->opm; 651 652 if (sch->driver && sch->driver->verify) 653 sch->driver->verify(&sch->dev); 654 655 spin_unlock_irq(&sch->lock); 656 put_device(&sch->dev); 657 return 0; 658} 659 660static int 661chp_add(int chpid) 662{ 663 int rc; 664 char dbf_txt[15]; 665 struct device *dev; 666 667 if (!get_chp_status(chpid)) 668 return 0; /* no need to do the rest */ 669 670 sprintf(dbf_txt, "cadd%x", chpid); 671 CIO_TRACE_EVENT(2, dbf_txt); 672 673 dev = get_device(&css[0]->chps[chpid]->dev); 674 rc = for_each_subchannel(__chp_add, to_channelpath(dev)); 675 if (css_slow_subchannels_exist()) 676 rc = -EAGAIN; 677 if (rc != -EAGAIN) 678 rc = 0; 679 put_device(dev); 680 return rc; 681} 682 683/* 684 * Handling of crw machine checks with channel path source. 685 */ 686int 687chp_process_crw(int chpid, int on) 688{ 689 if (on == 0) { 690 /* Path has gone. We use the link incident routine.*/ 691 s390_set_chpid_offline(chpid); 692 return 0; /* De-register is async anyway. */ 693 } 694 /* 695 * Path has come. Allocate a new channel path structure, 696 * if needed. 697 */ 698 if (get_chp_status(chpid) < 0) 699 new_channel_path(chpid); 700 /* Avoid the extra overhead in process_rec_acc. */ 701 return chp_add(chpid); 702} 703 704static inline int 705__check_for_io_and_kill(struct subchannel *sch, int index) 706{ 707 int cc; 708 709 if (!device_is_online(sch)) 710 /* cio could be doing I/O. */ 711 return 0; 712 cc = stsch(sch->schid, &sch->schib); 713 if (cc) 714 return 0; 715 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) { 716 device_set_waiting(sch); 717 return 1; 718 } 719 return 0; 720} 721 722static inline void 723__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) 724{ 725 int chp, old_lpm; 726 unsigned long flags; 727 728 if (!sch->ssd_info.valid) 729 return; 730 731 spin_lock_irqsave(&sch->lock, flags); 732 old_lpm = sch->lpm; 733 for (chp = 0; chp < 8; chp++) { 734 if (sch->ssd_info.chpid[chp] != chpid) 735 continue; 736 737 if (on) { 738 sch->opm |= (0x80 >> chp); 739 sch->lpm |= (0x80 >> chp); 740 if (!old_lpm) 741 device_trigger_reprobe(sch); 742 else if (sch->driver && sch->driver->verify) 743 sch->driver->verify(&sch->dev); 744 } else { 745 sch->opm &= ~(0x80 >> chp); 746 sch->lpm &= ~(0x80 >> chp); 747 /* 748 * Give running I/O a grace period in which it 749 * can successfully terminate, even using the 750 * just varied off path. Then kill it. 751 */ 752 if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) { 753 if (css_enqueue_subchannel_slow(sch->schid)) { 754 css_clear_subchannel_slow_list(); 755 need_rescan = 1; 756 } 757 } else if (sch->driver && sch->driver->verify) 758 sch->driver->verify(&sch->dev); 759 } 760 break; 761 } 762 spin_unlock_irqrestore(&sch->lock, flags); 763} 764 765static int 766s390_subchannel_vary_chpid_off(struct device *dev, void *data) 767{ 768 struct subchannel *sch; 769 __u8 *chpid; 770 771 sch = to_subchannel(dev); 772 chpid = data; 773 774 __s390_subchannel_vary_chpid(sch, *chpid, 0); 775 return 0; 776} 777 778static int 779s390_subchannel_vary_chpid_on(struct device *dev, void *data) 780{ 781 struct subchannel *sch; 782 __u8 *chpid; 783 784 sch = to_subchannel(dev); 785 chpid = data; 786 787 __s390_subchannel_vary_chpid(sch, *chpid, 1); 788 return 0; 789} 790 791static int 792__s390_vary_chpid_on(struct subchannel_id schid, void *data) 793{ 794 struct schib schib; 795 struct subchannel *sch; 796 797 sch = get_subchannel_by_schid(schid); 798 if (sch) { 799 put_device(&sch->dev); 800 return 0; 801 } 802 if (stsch_err(schid, &schib)) 803 /* We're through */ 804 return -ENXIO; 805 /* Put it on the slow path. */ 806 if (css_enqueue_subchannel_slow(schid)) { 807 css_clear_subchannel_slow_list(); 808 need_rescan = 1; 809 return -EAGAIN; 810 } 811 return 0; 812} 813 814/* 815 * Function: s390_vary_chpid 816 * Varies the specified chpid online or offline 817 */ 818static int 819s390_vary_chpid( __u8 chpid, int on) 820{ 821 char dbf_text[15]; 822 int status; 823 824 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid); 825 CIO_TRACE_EVENT( 2, dbf_text); 826 827 status = get_chp_status(chpid); 828 if (status < 0) { 829 printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid); 830 return -EINVAL; 831 } 832 833 if (!on && !status) { 834 printk(KERN_ERR "chpid %x is already offline\n", chpid); 835 return -EINVAL; 836 } 837 838 set_chp_logically_online(chpid, on); 839 840 /* 841 * Redo PathVerification on the devices the chpid connects to 842 */ 843 844 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ? 845 s390_subchannel_vary_chpid_on : 846 s390_subchannel_vary_chpid_off); 847 if (on) 848 /* Scan for new devices on varied on path. */ 849 for_each_subchannel(__s390_vary_chpid_on, NULL); 850 if (need_rescan || css_slow_subchannels_exist()) 851 queue_work(slow_path_wq, &slow_path_work); 852 return 0; 853} 854 855/* 856 * Channel measurement related functions 857 */ 858static ssize_t 859chp_measurement_chars_read(struct kobject *kobj, char *buf, loff_t off, 860 size_t count) 861{ 862 struct channel_path *chp; 863 unsigned int size; 864 865 chp = to_channelpath(container_of(kobj, struct device, kobj)); 866 if (!chp->cmg_chars) 867 return 0; 868 869 size = sizeof(struct cmg_chars); 870 871 if (off > size) 872 return 0; 873 if (off + count > size) 874 count = size - off; 875 memcpy(buf, chp->cmg_chars + off, count); 876 return count; 877} 878 879static struct bin_attribute chp_measurement_chars_attr = { 880 .attr = { 881 .name = "measurement_chars", 882 .mode = S_IRUSR, 883 .owner = THIS_MODULE, 884 }, 885 .size = sizeof(struct cmg_chars), 886 .read = chp_measurement_chars_read, 887}; 888 889static void 890chp_measurement_copy_block(struct cmg_entry *buf, 891 struct channel_subsystem *css, int chpid) 892{ 893 void *area; 894 struct cmg_entry *entry, reference_buf; 895 int idx; 896 897 if (chpid < 128) { 898 area = css->cub_addr1; 899 idx = chpid; 900 } else { 901 area = css->cub_addr2; 902 idx = chpid - 128; 903 } 904 entry = area + (idx * sizeof(struct cmg_entry)); 905 do { 906 memcpy(buf, entry, sizeof(*entry)); 907 memcpy(&reference_buf, entry, sizeof(*entry)); 908 } while (reference_buf.values[0] != buf->values[0]); 909} 910 911static ssize_t 912chp_measurement_read(struct kobject *kobj, char *buf, loff_t off, size_t count) 913{ 914 struct channel_path *chp; 915 struct channel_subsystem *css; 916 unsigned int size; 917 918 chp = to_channelpath(container_of(kobj, struct device, kobj)); 919 css = to_css(chp->dev.parent); 920 921 size = sizeof(struct cmg_entry); 922 923 /* Only allow single reads. */ 924 if (off || count < size) 925 return 0; 926 chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id); 927 count = size; 928 return count; 929} 930 931static struct bin_attribute chp_measurement_attr = { 932 .attr = { 933 .name = "measurement", 934 .mode = S_IRUSR, 935 .owner = THIS_MODULE, 936 }, 937 .size = sizeof(struct cmg_entry), 938 .read = chp_measurement_read, 939}; 940 941static void 942chsc_remove_chp_cmg_attr(struct channel_path *chp) 943{ 944 sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_chars_attr); 945 sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_attr); 946} 947 948static int 949chsc_add_chp_cmg_attr(struct channel_path *chp) 950{ 951 int ret; 952 953 ret = sysfs_create_bin_file(&chp->dev.kobj, 954 &chp_measurement_chars_attr); 955 if (ret) 956 return ret; 957 ret = sysfs_create_bin_file(&chp->dev.kobj, &chp_measurement_attr); 958 if (ret) 959 sysfs_remove_bin_file(&chp->dev.kobj, 960 &chp_measurement_chars_attr); 961 return ret; 962} 963 964static void 965chsc_remove_cmg_attr(struct channel_subsystem *css) 966{ 967 int i; 968 969 for (i = 0; i <= __MAX_CHPID; i++) { 970 if (!css->chps[i]) 971 continue; 972 chsc_remove_chp_cmg_attr(css->chps[i]); 973 } 974} 975 976static int 977chsc_add_cmg_attr(struct channel_subsystem *css) 978{ 979 int i, ret; 980 981 ret = 0; 982 for (i = 0; i <= __MAX_CHPID; i++) { 983 if (!css->chps[i]) 984 continue; 985 ret = chsc_add_chp_cmg_attr(css->chps[i]); 986 if (ret) 987 goto cleanup; 988 } 989 return ret; 990cleanup: 991 for (--i; i >= 0; i--) { 992 if (!css->chps[i]) 993 continue; 994 chsc_remove_chp_cmg_attr(css->chps[i]); 995 } 996 return ret; 997} 998 999 1000static int 1001__chsc_do_secm(struct channel_subsystem *css, int enable, void *page) 1002{ 1003 struct { 1004 struct chsc_header request; 1005 u32 operation_code : 2; 1006 u32 : 30; 1007 u32 key : 4; 1008 u32 : 28; 1009 u32 zeroes1; 1010 u32 cub_addr1; 1011 u32 zeroes2; 1012 u32 cub_addr2; 1013 u32 reserved[13]; 1014 struct chsc_header response; 1015 u32 status : 8; 1016 u32 : 4; 1017 u32 fmt : 4; 1018 u32 : 16; 1019 } *secm_area; 1020 int ret, ccode; 1021 1022 secm_area = page; 1023 secm_area->request.length = 0x0050; 1024 secm_area->request.code = 0x0016; 1025 1026 secm_area->key = PAGE_DEFAULT_KEY; 1027 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; 1028 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; 1029 1030 secm_area->operation_code = enable ? 0 : 1; 1031 1032 ccode = chsc(secm_area); 1033 if (ccode > 0) 1034 return (ccode == 3) ? -ENODEV : -EBUSY; 1035 1036 switch (secm_area->response.code) { 1037 case 0x0001: /* Success. */ 1038 ret = 0; 1039 break; 1040 case 0x0003: /* Invalid block. */ 1041 case 0x0007: /* Invalid format. */ 1042 case 0x0008: /* Other invalid block. */ 1043 CIO_CRW_EVENT(2, "Error in chsc request block!\n"); 1044 ret = -EINVAL; 1045 break; 1046 case 0x0004: /* Command not provided in model. */ 1047 CIO_CRW_EVENT(2, "Model does not provide secm\n"); 1048 ret = -EOPNOTSUPP; 1049 break; 1050 case 0x0102: /* cub adresses incorrect */ 1051 CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n"); 1052 ret = -EINVAL; 1053 break; 1054 case 0x0103: /* key error */ 1055 CIO_CRW_EVENT(2, "Access key error in secm\n"); 1056 ret = -EINVAL; 1057 break; 1058 case 0x0105: /* error while starting */ 1059 CIO_CRW_EVENT(2, "Error while starting channel measurement\n"); 1060 ret = -EIO; 1061 break; 1062 default: 1063 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", 1064 secm_area->response.code); 1065 ret = -EIO; 1066 } 1067 return ret; 1068} 1069 1070int 1071chsc_secm(struct channel_subsystem *css, int enable) 1072{ 1073 void *secm_area; 1074 int ret; 1075 1076 secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1077 if (!secm_area) 1078 return -ENOMEM; 1079 1080 mutex_lock(&css->mutex); 1081 if (enable && !css->cm_enabled) { 1082 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1083 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1084 if (!css->cub_addr1 || !css->cub_addr2) { 1085 free_page((unsigned long)css->cub_addr1); 1086 free_page((unsigned long)css->cub_addr2); 1087 free_page((unsigned long)secm_area); 1088 mutex_unlock(&css->mutex); 1089 return -ENOMEM; 1090 } 1091 } 1092 ret = __chsc_do_secm(css, enable, secm_area); 1093 if (!ret) { 1094 css->cm_enabled = enable; 1095 if (css->cm_enabled) { 1096 ret = chsc_add_cmg_attr(css); 1097 if (ret) { 1098 memset(secm_area, 0, PAGE_SIZE); 1099 __chsc_do_secm(css, 0, secm_area); 1100 css->cm_enabled = 0; 1101 } 1102 } else 1103 chsc_remove_cmg_attr(css); 1104 } 1105 if (enable && !css->cm_enabled) { 1106 free_page((unsigned long)css->cub_addr1); 1107 free_page((unsigned long)css->cub_addr2); 1108 } 1109 mutex_unlock(&css->mutex); 1110 free_page((unsigned long)secm_area); 1111 return ret; 1112} 1113 1114/* 1115 * Files for the channel path entries. 1116 */ 1117static ssize_t 1118chp_status_show(struct device *dev, struct device_attribute *attr, char *buf) 1119{ 1120 struct channel_path *chp = container_of(dev, struct channel_path, dev); 1121 1122 if (!chp) 1123 return 0; 1124 return (get_chp_status(chp->id) ? sprintf(buf, "online\n") : 1125 sprintf(buf, "offline\n")); 1126} 1127 1128static ssize_t 1129chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1130{ 1131 struct channel_path *cp = container_of(dev, struct channel_path, dev); 1132 char cmd[10]; 1133 int num_args; 1134 int error; 1135 1136 num_args = sscanf(buf, "%5s", cmd); 1137 if (!num_args) 1138 return count; 1139 1140 if (!strnicmp(cmd, "on", 2)) 1141 error = s390_vary_chpid(cp->id, 1); 1142 else if (!strnicmp(cmd, "off", 3)) 1143 error = s390_vary_chpid(cp->id, 0); 1144 else 1145 error = -EINVAL; 1146 1147 return error < 0 ? error : count; 1148 1149} 1150 1151static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write); 1152 1153static ssize_t 1154chp_type_show(struct device *dev, struct device_attribute *attr, char *buf) 1155{ 1156 struct channel_path *chp = container_of(dev, struct channel_path, dev); 1157 1158 if (!chp) 1159 return 0; 1160 return sprintf(buf, "%x\n", chp->desc.desc); 1161} 1162 1163static DEVICE_ATTR(type, 0444, chp_type_show, NULL); 1164 1165static ssize_t 1166chp_cmg_show(struct device *dev, struct device_attribute *attr, char *buf) 1167{ 1168 struct channel_path *chp = to_channelpath(dev); 1169 1170 if (!chp) 1171 return 0; 1172 if (chp->cmg == -1) /* channel measurements not available */ 1173 return sprintf(buf, "unknown\n"); 1174 return sprintf(buf, "%x\n", chp->cmg); 1175} 1176 1177static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL); 1178 1179static ssize_t 1180chp_shared_show(struct device *dev, struct device_attribute *attr, char *buf) 1181{ 1182 struct channel_path *chp = to_channelpath(dev); 1183 1184 if (!chp) 1185 return 0; 1186 if (chp->shared == -1) /* channel measurements not available */ 1187 return sprintf(buf, "unknown\n"); 1188 return sprintf(buf, "%x\n", chp->shared); 1189} 1190 1191static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL); 1192 1193static struct attribute * chp_attrs[] = { 1194 &dev_attr_status.attr, 1195 &dev_attr_type.attr, 1196 &dev_attr_cmg.attr, 1197 &dev_attr_shared.attr, 1198 NULL, 1199}; 1200 1201static struct attribute_group chp_attr_group = { 1202 .attrs = chp_attrs, 1203}; 1204 1205static void 1206chp_release(struct device *dev) 1207{ 1208 struct channel_path *cp; 1209 1210 cp = container_of(dev, struct channel_path, dev); 1211 kfree(cp); 1212} 1213 1214static int 1215chsc_determine_channel_path_description(int chpid, 1216 struct channel_path_desc *desc) 1217{ 1218 int ccode, ret; 1219 1220 struct { 1221 struct chsc_header request; 1222 u32 : 24; 1223 u32 first_chpid : 8; 1224 u32 : 24; 1225 u32 last_chpid : 8; 1226 u32 zeroes1; 1227 struct chsc_header response; 1228 u32 zeroes2; 1229 struct channel_path_desc desc; 1230 } *scpd_area; 1231 1232 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1233 if (!scpd_area) 1234 return -ENOMEM; 1235 1236 scpd_area->request.length = 0x0010; 1237 scpd_area->request.code = 0x0002; 1238 1239 scpd_area->first_chpid = chpid; 1240 scpd_area->last_chpid = chpid; 1241 1242 ccode = chsc(scpd_area); 1243 if (ccode > 0) { 1244 ret = (ccode == 3) ? -ENODEV : -EBUSY; 1245 goto out; 1246 } 1247 1248 switch (scpd_area->response.code) { 1249 case 0x0001: /* Success. */ 1250 memcpy(desc, &scpd_area->desc, 1251 sizeof(struct channel_path_desc)); 1252 ret = 0; 1253 break; 1254 case 0x0003: /* Invalid block. */ 1255 case 0x0007: /* Invalid format. */ 1256 case 0x0008: /* Other invalid block. */ 1257 CIO_CRW_EVENT(2, "Error in chsc request block!\n"); 1258 ret = -EINVAL; 1259 break; 1260 case 0x0004: /* Command not provided in model. */ 1261 CIO_CRW_EVENT(2, "Model does not provide scpd\n"); 1262 ret = -EOPNOTSUPP; 1263 break; 1264 default: 1265 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", 1266 scpd_area->response.code); 1267 ret = -EIO; 1268 } 1269out: 1270 free_page((unsigned long)scpd_area); 1271 return ret; 1272} 1273 1274static void 1275chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 1276 struct cmg_chars *chars) 1277{ 1278 switch (chp->cmg) { 1279 case 2: 1280 case 3: 1281 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars), 1282 GFP_KERNEL); 1283 if (chp->cmg_chars) { 1284 int i, mask; 1285 struct cmg_chars *cmg_chars; 1286 1287 cmg_chars = chp->cmg_chars; 1288 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { 1289 mask = 0x80 >> (i + 3); 1290 if (cmcv & mask) 1291 cmg_chars->values[i] = chars->values[i]; 1292 else 1293 cmg_chars->values[i] = 0; 1294 } 1295 } 1296 break; 1297 default: 1298 /* No cmg-dependent data. */ 1299 break; 1300 } 1301} 1302 1303static int 1304chsc_get_channel_measurement_chars(struct channel_path *chp) 1305{ 1306 int ccode, ret; 1307 1308 struct { 1309 struct chsc_header request; 1310 u32 : 24; 1311 u32 first_chpid : 8; 1312 u32 : 24; 1313 u32 last_chpid : 8; 1314 u32 zeroes1; 1315 struct chsc_header response; 1316 u32 zeroes2; 1317 u32 not_valid : 1; 1318 u32 shared : 1; 1319 u32 : 22; 1320 u32 chpid : 8; 1321 u32 cmcv : 5; 1322 u32 : 11; 1323 u32 cmgq : 8; 1324 u32 cmg : 8; 1325 u32 zeroes3; 1326 u32 data[NR_MEASUREMENT_CHARS]; 1327 } *scmc_area; 1328 1329 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1330 if (!scmc_area) 1331 return -ENOMEM; 1332 1333 scmc_area->request.length = 0x0010; 1334 scmc_area->request.code = 0x0022; 1335 1336 scmc_area->first_chpid = chp->id; 1337 scmc_area->last_chpid = chp->id; 1338 1339 ccode = chsc(scmc_area); 1340 if (ccode > 0) { 1341 ret = (ccode == 3) ? -ENODEV : -EBUSY; 1342 goto out; 1343 } 1344 1345 switch (scmc_area->response.code) { 1346 case 0x0001: /* Success. */ 1347 if (!scmc_area->not_valid) { 1348 chp->cmg = scmc_area->cmg; 1349 chp->shared = scmc_area->shared; 1350 chsc_initialize_cmg_chars(chp, scmc_area->cmcv, 1351 (struct cmg_chars *) 1352 &scmc_area->data); 1353 } else { 1354 chp->cmg = -1; 1355 chp->shared = -1; 1356 } 1357 ret = 0; 1358 break; 1359 case 0x0003: /* Invalid block. */ 1360 case 0x0007: /* Invalid format. */ 1361 case 0x0008: /* Invalid bit combination. */ 1362 CIO_CRW_EVENT(2, "Error in chsc request block!\n"); 1363 ret = -EINVAL; 1364 break; 1365 case 0x0004: /* Command not provided. */ 1366 CIO_CRW_EVENT(2, "Model does not provide scmc\n"); 1367 ret = -EOPNOTSUPP; 1368 break; 1369 default: 1370 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", 1371 scmc_area->response.code); 1372 ret = -EIO; 1373 } 1374out: 1375 free_page((unsigned long)scmc_area); 1376 return ret; 1377} 1378 1379/* 1380 * Entries for chpids on the system bus. 1381 * This replaces /proc/chpids. 1382 */ 1383static int 1384new_channel_path(int chpid) 1385{ 1386 struct channel_path *chp; 1387 int ret; 1388 1389 chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL); 1390 if (!chp) 1391 return -ENOMEM; 1392 1393 /* fill in status, etc. */ 1394 chp->id = chpid; 1395 chp->state = 1; 1396 chp->dev = (struct device) { 1397 .parent = &css[0]->device, 1398 .release = chp_release, 1399 }; 1400 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid); 1401 1402 /* Obtain channel path description and fill it in. */ 1403 ret = chsc_determine_channel_path_description(chpid, &chp->desc); 1404 if (ret) 1405 goto out_free; 1406 /* Get channel-measurement characteristics. */ 1407 if (css_characteristics_avail && css_chsc_characteristics.scmc 1408 && css_chsc_characteristics.secm) { 1409 ret = chsc_get_channel_measurement_chars(chp); 1410 if (ret) 1411 goto out_free; 1412 } else { 1413 static int msg_done; 1414 1415 if (!msg_done) { 1416 printk(KERN_WARNING "cio: Channel measurements not " 1417 "available, continuing.\n"); 1418 msg_done = 1; 1419 } 1420 chp->cmg = -1; 1421 } 1422 1423 /* make it known to the system */ 1424 ret = device_register(&chp->dev); 1425 if (ret) { 1426 printk(KERN_WARNING "%s: could not register %02x\n", 1427 __func__, chpid); 1428 goto out_free; 1429 } 1430 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group); 1431 if (ret) { 1432 device_unregister(&chp->dev); 1433 goto out_free; 1434 } 1435 mutex_lock(&css[0]->mutex); 1436 if (css[0]->cm_enabled) { 1437 ret = chsc_add_chp_cmg_attr(chp); 1438 if (ret) { 1439 sysfs_remove_group(&chp->dev.kobj, &chp_attr_group); 1440 device_unregister(&chp->dev); 1441 mutex_unlock(&css[0]->mutex); 1442 goto out_free; 1443 } 1444 } 1445 css[0]->chps[chpid] = chp; 1446 mutex_unlock(&css[0]->mutex); 1447 return ret; 1448out_free: 1449 kfree(chp); 1450 return ret; 1451} 1452 1453void * 1454chsc_get_chp_desc(struct subchannel *sch, int chp_no) 1455{ 1456 struct channel_path *chp; 1457 struct channel_path_desc *desc; 1458 1459 chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]]; 1460 if (!chp) 1461 return NULL; 1462 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); 1463 if (!desc) 1464 return NULL; 1465 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc)); 1466 return desc; 1467} 1468 1469 1470static int __init 1471chsc_alloc_sei_area(void) 1472{ 1473 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1474 if (!sei_page) 1475 printk(KERN_WARNING"Can't allocate page for processing of " \ 1476 "chsc machine checks!\n"); 1477 return (sei_page ? 0 : -ENOMEM); 1478} 1479 1480int __init 1481chsc_enable_facility(int operation_code) 1482{ 1483 int ret; 1484 struct { 1485 struct chsc_header request; 1486 u8 reserved1:4; 1487 u8 format:4; 1488 u8 reserved2; 1489 u16 operation_code; 1490 u32 reserved3; 1491 u32 reserved4; 1492 u32 operation_data_area[252]; 1493 struct chsc_header response; 1494 u32 reserved5:4; 1495 u32 format2:4; 1496 u32 reserved6:24; 1497 } *sda_area; 1498 1499 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); 1500 if (!sda_area) 1501 return -ENOMEM; 1502 sda_area->request.length = 0x0400; 1503 sda_area->request.code = 0x0031; 1504 sda_area->operation_code = operation_code; 1505 1506 ret = chsc(sda_area); 1507 if (ret > 0) { 1508 ret = (ret == 3) ? -ENODEV : -EBUSY; 1509 goto out; 1510 } 1511 switch (sda_area->response.code) { 1512 case 0x0001: /* everything ok */ 1513 ret = 0; 1514 break; 1515 case 0x0003: /* invalid request block */ 1516 case 0x0007: 1517 ret = -EINVAL; 1518 break; 1519 case 0x0004: /* command not provided */ 1520 case 0x0101: /* facility not provided */ 1521 ret = -EOPNOTSUPP; 1522 break; 1523 default: /* something went wrong */ 1524 ret = -EIO; 1525 } 1526 out: 1527 free_page((unsigned long)sda_area); 1528 return ret; 1529} 1530 1531subsys_initcall(chsc_alloc_sei_area); 1532 1533struct css_general_char css_general_characteristics; 1534struct css_chsc_char css_chsc_characteristics; 1535 1536int __init 1537chsc_determine_css_characteristics(void) 1538{ 1539 int result; 1540 struct { 1541 struct chsc_header request; 1542 u32 reserved1; 1543 u32 reserved2; 1544 u32 reserved3; 1545 struct chsc_header response; 1546 u32 reserved4; 1547 u32 general_char[510]; 1548 u32 chsc_char[518]; 1549 } *scsc_area; 1550 1551 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1552 if (!scsc_area) { 1553 printk(KERN_WARNING"cio: Was not able to determine available" \ 1554 "CHSCs due to no memory.\n"); 1555 return -ENOMEM; 1556 } 1557 1558 scsc_area->request.length = 0x0010; 1559 scsc_area->request.code = 0x0010; 1560 1561 result = chsc(scsc_area); 1562 if (result) { 1563 printk(KERN_WARNING"cio: Was not able to determine " \ 1564 "available CHSCs, cc=%i.\n", result); 1565 result = -EIO; 1566 goto exit; 1567 } 1568 1569 if (scsc_area->response.code != 1) { 1570 printk(KERN_WARNING"cio: Was not able to determine " \ 1571 "available CHSCs.\n"); 1572 result = -EIO; 1573 goto exit; 1574 } 1575 memcpy(&css_general_characteristics, scsc_area->general_char, 1576 sizeof(css_general_characteristics)); 1577 memcpy(&css_chsc_characteristics, scsc_area->chsc_char, 1578 sizeof(css_chsc_characteristics)); 1579exit: 1580 free_page ((unsigned long) scsc_area); 1581 return result; 1582} 1583 1584EXPORT_SYMBOL_GPL(css_general_characteristics); 1585EXPORT_SYMBOL_GPL(css_chsc_characteristics); 1586