Lines Matching refs:hp

101 static int (*hvsi_wait)(struct hvsi_struct *hp, int state);
113 static inline int is_console(struct hvsi_struct *hp)
115 return hp->flags & HVSI_CONSOLE;
118 static inline int is_open(struct hvsi_struct *hp)
121 return (hp->state == HVSI_OPEN)
122 || (hp->state == HVSI_WAIT_FOR_MCTRL_RESPONSE);
125 static inline void print_state(struct hvsi_struct *hp)
136 const char *name = (hp->state < ARRAY_SIZE(state_names))
137 ? state_names[hp->state] : "UNKNOWN";
139 pr_debug("hvsi%i: state = %s\n", hp->index, name);
143 static inline void __set_state(struct hvsi_struct *hp, int state)
145 hp->state = state;
146 print_state(hp);
147 wake_up_all(&hp->stateq);
150 static inline void set_state(struct hvsi_struct *hp, int state)
154 spin_lock_irqsave(&hp->lock, flags);
155 __set_state(hp, state);
156 spin_unlock_irqrestore(&hp->lock, flags);
170 static inline int got_packet(const struct hvsi_struct *hp, uint8_t *packet)
172 if (hp->inbuf_end < packet + sizeof(struct hvsi_header))
175 if (hp->inbuf_end < (packet + len_packet(packet)))
182 static void compact_inbuf(struct hvsi_struct *hp, uint8_t *read_to)
184 int remaining = (int)(hp->inbuf_end - read_to);
188 if (read_to != hp->inbuf)
189 memmove(hp->inbuf, read_to, remaining);
191 hp->inbuf_end = hp->inbuf + remaining;
230 static int hvsi_read(struct hvsi_struct *hp, char *buf, int count)
234 got = hvc_get_chars(hp->vtermno, buf, count);
239 static void hvsi_recv_control(struct hvsi_struct *hp, uint8_t *packet,
248 pr_debug("hvsi%i: CD dropped\n", hp->index);
249 hp->mctrl &= TIOCM_CD;
250 /* If userland hasn't done an open(2) yet, hp->tty is NULL. */
251 if (hp->tty && !(hp->tty->flags & CLOCAL))
252 *to_hangup = hp->tty;
256 pr_debug("hvsi%i: service processor came back\n", hp->index);
257 if (hp->state != HVSI_CLOSED) {
258 *to_handshake = hp;
263 hp->index);
269 static void hvsi_recv_response(struct hvsi_struct *hp, uint8_t *packet)
273 switch (hp->state) {
275 __set_state(hp, HVSI_WAIT_FOR_VER_QUERY);
278 hp->mctrl = 0;
280 hp->mctrl |= TIOCM_DTR;
282 hp->mctrl |= TIOCM_CD;
283 __set_state(hp, HVSI_OPEN);
286 printk(KERN_ERR "hvsi%i: unexpected query response: ", hp->index);
293 static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
300 packet.hdr.seqno = atomic_inc_return(&hp->seqno);
308 wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len);
311 hp->index);
318 static void hvsi_recv_query(struct hvsi_struct *hp, uint8_t *packet)
322 switch (hp->state) {
324 hvsi_version_respond(hp, query->hdr.seqno);
325 __set_state(hp, HVSI_OPEN);
328 printk(KERN_ERR "hvsi%i: unexpected query: ", hp->index);
334 static void hvsi_insert_chars(struct hvsi_struct *hp, const char *buf, int len)
342 hp->sysrq = 1;
344 } else if (hp->sysrq) {
346 hp->sysrq = 0;
350 tty_insert_flip_char(hp->tty, c, 0);
363 static struct tty_struct *hvsi_recv_data(struct hvsi_struct *hp,
381 hvsi_insert_chars(hp, data, datalen);
389 memcpy(hp->throttle_buf, data + TTY_THRESHOLD_THROTTLE, overflow);
390 hp->n_throttle = overflow;
393 return hp->tty;
402 static int hvsi_load_chunk(struct hvsi_struct *hp, struct tty_struct **flip,
405 uint8_t *packet = hp->inbuf;
412 chunklen = hvsi_read(hp, hp->inbuf_end, HVSI_MAX_READ);
419 dbg_dump_hex(hp->inbuf_end, chunklen);
421 hp->inbuf_end += chunklen;
424 while ((packet < hp->inbuf_end) && got_packet(hp, packet)) {
428 printk(KERN_ERR "hvsi%i: got malformed packet\n", hp->index);
430 while ((packet < hp->inbuf_end) && (!is_header(packet)))
441 if (!is_open(hp))
443 if (hp->tty == NULL)
445 *flip = hvsi_recv_data(hp, packet);
448 hvsi_recv_control(hp, packet, hangup, handshake);
451 hvsi_recv_response(hp, packet);
454 hvsi_recv_query(hp, packet);
458 hp->index, header->type);
476 compact_inbuf(hp, packet);
481 static void hvsi_send_overflow(struct hvsi_struct *hp)
484 hp->n_throttle);
486 hvsi_insert_chars(hp, hp->throttle_buf, hp->n_throttle);
487 hp->n_throttle = 0;
496 struct hvsi_struct *hp = (struct hvsi_struct *)arg;
506 spin_lock_irqsave(&hp->lock, flags);
507 again = hvsi_load_chunk(hp, &flip, &hangup, &handshake);
508 spin_unlock_irqrestore(&hp->lock, flags);
532 spin_lock_irqsave(&hp->lock, flags);
533 if (hp->tty && hp->n_throttle
534 && (!test_bit(TTY_THROTTLED, &hp->tty->flags))) {
537 flip = hp->tty;
538 hvsi_send_overflow(hp);
540 spin_unlock_irqrestore(&hp->lock, flags);
550 static int __init poll_for_state(struct hvsi_struct *hp, int state)
555 hvsi_interrupt(hp->virq, (void *)hp); /* get pending data */
557 if (hp->state == state)
567 static int wait_for_state(struct hvsi_struct *hp, int state)
571 if (!wait_event_timeout(hp->stateq, (hp->state == state), HVSI_TIMEOUT))
577 static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
584 packet.hdr.seqno = atomic_inc_return(&hp->seqno);
590 wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len);
592 printk(KERN_ERR "hvsi%i: couldn't send query (%i)!\n", hp->index,
600 static int hvsi_get_mctrl(struct hvsi_struct *hp)
604 set_state(hp, HVSI_WAIT_FOR_MCTRL_RESPONSE);
605 hvsi_query(hp, VSV_SEND_MODEM_CTL_STATUS);
607 ret = hvsi_wait(hp, HVSI_OPEN);
609 printk(KERN_ERR "hvsi%i: didn't get modem flags\n", hp->index);
610 set_state(hp, HVSI_OPEN);
614 pr_debug("%s: mctrl 0x%x\n", __func__, hp->mctrl);
620 static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
626 packet.hdr.seqno = atomic_inc_return(&hp->seqno);
637 wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len);
639 printk(KERN_ERR "hvsi%i: couldn't set DTR!\n", hp->index);
646 static void hvsi_drain_input(struct hvsi_struct *hp)
652 if (0 == hvsi_read(hp, buf, HVSI_MAX_READ))
656 static int hvsi_handshake(struct hvsi_struct *hp)
667 hvsi_drain_input(hp);
669 set_state(hp, HVSI_WAIT_FOR_VER_RESPONSE);
670 ret = hvsi_query(hp, VSV_SEND_VERSION_NUMBER);
672 printk(KERN_ERR "hvsi%i: couldn't send version query\n", hp->index);
676 ret = hvsi_wait(hp, HVSI_OPEN);
685 struct hvsi_struct *hp =
688 if (hvsi_handshake(hp) >= 0)
691 printk(KERN_ERR "hvsi%i: re-handshaking failed\n", hp->index);
692 if (is_console(hp)) {
697 printk(KERN_ERR "hvsi%i: lost console!\n", hp->index);
701 static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
709 packet.hdr.seqno = atomic_inc_return(&hp->seqno);
713 ret = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len);
721 static void hvsi_close_protocol(struct hvsi_struct *hp)
726 packet.hdr.seqno = atomic_inc_return(&hp->seqno);
733 hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len);
738 struct hvsi_struct *hp;
747 hp = &hvsi_ports[line];
749 tty->driver_data = hp;
752 if (hp->state == HVSI_FSP_DIED)
755 spin_lock_irqsave(&hp->lock, flags);
756 hp->tty = tty;
757 hp->count++;
758 atomic_set(&hp->seqno, 0);
759 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
760 spin_unlock_irqrestore(&hp->lock, flags);
762 if (is_console(hp))
765 ret = hvsi_handshake(hp);
771 ret = hvsi_get_mctrl(hp);
777 ret = hvsi_set_mctrl(hp, hp->mctrl | TIOCM_DTR);
786 /* wait for hvsi_write_worker to empty hp->outbuf */
787 static void hvsi_flush_output(struct hvsi_struct *hp)
789 wait_event_timeout(hp->emptyq, (hp->n_outbuf <= 0), HVSI_TIMEOUT);
792 cancel_delayed_work_sync(&hp->writer);
793 flush_work_sync(&hp->handshaker);
799 hp->n_outbuf = 0;
804 struct hvsi_struct *hp = tty->driver_data;
812 spin_lock_irqsave(&hp->lock, flags);
814 if (--hp->count == 0) {
815 hp->tty = NULL;
816 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
819 if (!is_console(hp)) {
820 h_vio_signal(hp->vtermno, VIO_IRQ_DISABLE); /* no more irqs */
821 __set_state(hp, HVSI_CLOSED);
828 spin_unlock_irqrestore(&hp->lock, flags);
831 synchronize_irq(hp->virq);
834 hvsi_flush_output(hp);
837 hvsi_close_protocol(hp);
843 hvsi_drain_input(hp);
845 spin_lock_irqsave(&hp->lock, flags);
847 } else if (hp->count < 0)
849 hp - hvsi_ports, hp->count);
851 spin_unlock_irqrestore(&hp->lock, flags);
856 struct hvsi_struct *hp = tty->driver_data;
861 spin_lock_irqsave(&hp->lock, flags);
863 hp->count = 0;
864 hp->n_outbuf = 0;
865 hp->tty = NULL;
867 spin_unlock_irqrestore(&hp->lock, flags);
870 /* called with hp->lock held */
871 static void hvsi_push(struct hvsi_struct *hp)
875 if (hp->n_outbuf <= 0)
878 n = hvsi_put_chars(hp, hp->outbuf, hp->n_outbuf);
882 hp->n_outbuf = 0;
884 __set_state(hp, HVSI_FSP_DIED);
885 printk(KERN_ERR "hvsi%i: service processor died\n", hp->index);
892 struct hvsi_struct *hp =
902 spin_lock_irqsave(&hp->lock, flags);
904 pr_debug("%s: %i chars in buffer\n", __func__, hp->n_outbuf);
906 if (!is_open(hp)) {
913 schedule_delayed_work(&hp->writer, HZ);
917 hvsi_push(hp);
918 if (hp->n_outbuf > 0)
919 schedule_delayed_work(&hp->writer, 10);
926 wake_up_all(&hp->emptyq);
927 tty_wakeup(hp->tty);
931 spin_unlock_irqrestore(&hp->lock, flags);
936 struct hvsi_struct *hp = tty->driver_data;
938 return N_OUTBUF - hp->n_outbuf;
943 struct hvsi_struct *hp = tty->driver_data;
945 return hp->n_outbuf;
951 struct hvsi_struct *hp = tty->driver_data;
957 spin_lock_irqsave(&hp->lock, flags);
959 pr_debug("%s: %i chars in buffer\n", __func__, hp->n_outbuf);
961 if (!is_open(hp)) {
968 * when the hypervisor buffer (16K) fills, data will stay in hp->outbuf
972 while ((count > 0) && (hvsi_write_room(hp->tty) > 0)) {
973 int chunksize = min(count, hvsi_write_room(hp->tty));
975 BUG_ON(hp->n_outbuf < 0);
976 memcpy(hp->outbuf + hp->n_outbuf, source, chunksize);
977 hp->n_outbuf += chunksize;
982 hvsi_push(hp);
985 if (hp->n_outbuf > 0) {
990 schedule_delayed_work(&hp->writer, 10);
994 spin_unlock_irqrestore(&hp->lock, flags);
1009 struct hvsi_struct *hp = tty->driver_data;
1013 h_vio_signal(hp->vtermno, VIO_IRQ_DISABLE);
1018 struct hvsi_struct *hp = tty->driver_data;
1024 spin_lock_irqsave(&hp->lock, flags);
1025 if (hp->n_throttle) {
1026 hvsi_send_overflow(hp);
1029 spin_unlock_irqrestore(&hp->lock, flags);
1032 tty_flip_buffer_push(hp->tty);
1034 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
1039 struct hvsi_struct *hp = tty->driver_data;
1041 hvsi_get_mctrl(hp);
1042 return hp->mctrl;
1048 struct hvsi_struct *hp = tty->driver_data;
1056 spin_lock_irqsave(&hp->lock, flags);
1058 new_mctrl = (hp->mctrl & ~clear) | set;
1060 if (hp->mctrl != new_mctrl) {
1061 hvsi_set_mctrl(hp, new_mctrl);
1062 hp->mctrl = new_mctrl;
1064 spin_unlock_irqrestore(&hp->lock, flags);
1105 struct hvsi_struct *hp = &hvsi_ports[i];
1108 ret = request_irq(hp->virq, hvsi_interrupt, 0, "hvsi", hp);
1111 hp->virq, ret);
1129 struct hvsi_struct *hp = &hvsi_ports[console->index];
1135 if (!is_open(hp))
1153 ret = hvsi_put_chars(hp, c, i);
1170 struct hvsi_struct *hp;
1175 hp = &hvsi_ports[console->index];
1178 hvsi_close_protocol(hp);
1180 ret = hvsi_handshake(hp);
1184 ret = hvsi_get_mctrl(hp);
1188 ret = hvsi_set_mctrl(hp, hp->mctrl | TIOCM_DTR);
1192 hp->flags |= HVSI_CONSOLE;
1216 struct hvsi_struct *hp;
1229 hp = &hvsi_ports[hvsi_count];
1230 INIT_DELAYED_WORK(&hp->writer, hvsi_write_worker);
1231 INIT_WORK(&hp->handshaker, hvsi_handshaker);
1232 init_waitqueue_head(&hp->emptyq);
1233 init_waitqueue_head(&hp->stateq);
1234 spin_lock_init(&hp->lock);
1235 hp->index = hvsi_count;
1236 hp->inbuf_end = hp->inbuf;
1237 hp->state = HVSI_CLOSED;
1238 hp->vtermno = *vtermno;
1239 hp->virq = irq_create_mapping(NULL, irq[0]);
1240 if (hp->virq == NO_IRQ) {