client.c revision a8605ea2c20c2b97a54d7746c16ebef5ba29632a
1/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13 * more details.
14 *
15 */
16
17#include <linux/sched.h>
18#include <linux/wait.h>
19#include <linux/delay.h>
20#include <linux/slab.h>
21#include <linux/pm_runtime.h>
22
23#include <linux/mei.h>
24
25#include "mei_dev.h"
26#include "hbm.h"
27#include "client.h"
28
29/**
30 * mei_me_cl_by_uuid - locate me client by uuid
31 *
32 * @dev: mei device
33 * @uuid: me client uuid
34 *
35 * Locking: called under "dev->device_lock" lock
36 *
37 * Return: me client or NULL if not found
38 */
39struct mei_me_client *mei_me_cl_by_uuid(const struct mei_device *dev,
40					const uuid_le *uuid)
41{
42	struct mei_me_client *me_cl;
43
44	list_for_each_entry(me_cl, &dev->me_clients, list)
45		if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0)
46			return me_cl;
47
48	return NULL;
49}
50
51/**
52 * mei_me_cl_by_id - locate me client by client id
53 *
54 * @dev: the device structure
55 * @client_id: me client id
56 *
57 * Locking: called under "dev->device_lock" lock
58 *
59 * Return: me client or NULL if not found
60 */
61struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
62{
63
64	struct mei_me_client *me_cl;
65
66	list_for_each_entry(me_cl, &dev->me_clients, list)
67		if (me_cl->client_id == client_id)
68			return me_cl;
69	return NULL;
70}
71
72/**
73 * mei_me_cl_by_uuid_id - locate me client by client id and uuid
74 *
75 * @dev: the device structure
76 * @uuid: me client uuid
77 * @client_id: me client id
78 *
79 * Locking: called under "dev->device_lock" lock
80 *
81 * Return: me client or NULL if not found
82 */
83struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
84					   const uuid_le *uuid, u8 client_id)
85{
86	struct mei_me_client *me_cl;
87
88	list_for_each_entry(me_cl, &dev->me_clients, list)
89		if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0 &&
90		    me_cl->client_id == client_id)
91			return me_cl;
92	return NULL;
93}
94
95/**
96 * mei_me_cl_remove - remove me client matching uuid and client_id
97 *
98 * @dev: the device structure
99 * @uuid: me client uuid
100 * @client_id: me client address
101 */
102void mei_me_cl_remove(struct mei_device *dev, const uuid_le *uuid, u8 client_id)
103{
104	struct mei_me_client *me_cl, *next;
105
106	list_for_each_entry_safe(me_cl, next, &dev->me_clients, list) {
107		if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0 &&
108		    me_cl->client_id == client_id) {
109			list_del(&me_cl->list);
110			kfree(me_cl);
111			break;
112		}
113	}
114}
115
116
117/**
118 * mei_cl_cmp_id - tells if the clients are the same
119 *
120 * @cl1: host client 1
121 * @cl2: host client 2
122 *
123 * Return: true  - if the clients has same host and me ids
124 *         false - otherwise
125 */
126static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
127				const struct mei_cl *cl2)
128{
129	return cl1 && cl2 &&
130		(cl1->host_client_id == cl2->host_client_id) &&
131		(cl1->me_client_id == cl2->me_client_id);
132}
133
134/**
135 * mei_io_list_flush - removes cbs belonging to cl.
136 *
137 * @list:  an instance of our list structure
138 * @cl:    host client, can be NULL for flushing the whole list
139 * @free:  whether to free the cbs
140 */
141static void __mei_io_list_flush(struct mei_cl_cb *list,
142				struct mei_cl *cl, bool free)
143{
144	struct mei_cl_cb *cb;
145	struct mei_cl_cb *next;
146
147	/* enable removing everything if no cl is specified */
148	list_for_each_entry_safe(cb, next, &list->list, list) {
149		if (!cl || (cb->cl && mei_cl_cmp_id(cl, cb->cl))) {
150			list_del(&cb->list);
151			if (free)
152				mei_io_cb_free(cb);
153		}
154	}
155}
156
157/**
158 * mei_io_list_flush - removes list entry belonging to cl.
159 *
160 * @list:  An instance of our list structure
161 * @cl: host client
162 */
163void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
164{
165	__mei_io_list_flush(list, cl, false);
166}
167
168
169/**
170 * mei_io_list_free - removes cb belonging to cl and free them
171 *
172 * @list:  An instance of our list structure
173 * @cl: host client
174 */
175static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl)
176{
177	__mei_io_list_flush(list, cl, true);
178}
179
180/**
181 * mei_io_cb_free - free mei_cb_private related memory
182 *
183 * @cb: mei callback struct
184 */
185void mei_io_cb_free(struct mei_cl_cb *cb)
186{
187	if (cb == NULL)
188		return;
189
190	kfree(cb->request_buffer.data);
191	kfree(cb->response_buffer.data);
192	kfree(cb);
193}
194
195/**
196 * mei_io_cb_init - allocate and initialize io callback
197 *
198 * @cl: mei client
199 * @fp: pointer to file structure
200 *
201 * Return: mei_cl_cb pointer or NULL;
202 */
203struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
204{
205	struct mei_cl_cb *cb;
206
207	cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
208	if (!cb)
209		return NULL;
210
211	mei_io_list_init(cb);
212
213	cb->file_object = fp;
214	cb->cl = cl;
215	cb->buf_idx = 0;
216	return cb;
217}
218
219/**
220 * mei_io_cb_alloc_req_buf - allocate request buffer
221 *
222 * @cb: io callback structure
223 * @length: size of the buffer
224 *
225 * Return: 0 on success
226 *         -EINVAL if cb is NULL
227 *         -ENOMEM if allocation failed
228 */
229int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
230{
231	if (!cb)
232		return -EINVAL;
233
234	if (length == 0)
235		return 0;
236
237	cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
238	if (!cb->request_buffer.data)
239		return -ENOMEM;
240	cb->request_buffer.size = length;
241	return 0;
242}
243/**
244 * mei_io_cb_alloc_resp_buf - allocate response buffer
245 *
246 * @cb: io callback structure
247 * @length: size of the buffer
248 *
249 * Return: 0 on success
250 *         -EINVAL if cb is NULL
251 *         -ENOMEM if allocation failed
252 */
253int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
254{
255	if (!cb)
256		return -EINVAL;
257
258	if (length == 0)
259		return 0;
260
261	cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
262	if (!cb->response_buffer.data)
263		return -ENOMEM;
264	cb->response_buffer.size = length;
265	return 0;
266}
267
268
269
270/**
271 * mei_cl_flush_queues - flushes queue lists belonging to cl.
272 *
273 * @cl: host client
274 */
275int mei_cl_flush_queues(struct mei_cl *cl)
276{
277	struct mei_device *dev;
278
279	if (WARN_ON(!cl || !cl->dev))
280		return -EINVAL;
281
282	dev = cl->dev;
283
284	cl_dbg(dev, cl, "remove list entry belonging to cl\n");
285	mei_io_list_flush(&cl->dev->read_list, cl);
286	mei_io_list_free(&cl->dev->write_list, cl);
287	mei_io_list_free(&cl->dev->write_waiting_list, cl);
288	mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
289	mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
290	mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
291	mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
292	return 0;
293}
294
295
296/**
297 * mei_cl_init - initializes cl.
298 *
299 * @cl: host client to be initialized
300 * @dev: mei device
301 */
302void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
303{
304	memset(cl, 0, sizeof(struct mei_cl));
305	init_waitqueue_head(&cl->wait);
306	init_waitqueue_head(&cl->rx_wait);
307	init_waitqueue_head(&cl->tx_wait);
308	INIT_LIST_HEAD(&cl->link);
309	INIT_LIST_HEAD(&cl->device_link);
310	cl->reading_state = MEI_IDLE;
311	cl->writing_state = MEI_IDLE;
312	cl->dev = dev;
313}
314
315/**
316 * mei_cl_allocate - allocates cl  structure and sets it up.
317 *
318 * @dev: mei device
319 * Return:  The allocated file or NULL on failure
320 */
321struct mei_cl *mei_cl_allocate(struct mei_device *dev)
322{
323	struct mei_cl *cl;
324
325	cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
326	if (!cl)
327		return NULL;
328
329	mei_cl_init(cl, dev);
330
331	return cl;
332}
333
334/**
335 * mei_cl_find_read_cb - find this cl's callback in the read list
336 *
337 * @cl: host client
338 *
339 * Return: cb on success, NULL on error
340 */
341struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
342{
343	struct mei_device *dev = cl->dev;
344	struct mei_cl_cb *cb;
345
346	list_for_each_entry(cb, &dev->read_list.list, list)
347		if (mei_cl_cmp_id(cl, cb->cl))
348			return cb;
349	return NULL;
350}
351
352/** mei_cl_link: allocate host id in the host map
353 *
354 * @cl - host client
355 * @id - fixed host id or -1 for generic one
356 *
357 * Return: 0 on success
358 *	-EINVAL on incorrect values
359 *	-ENONET if client not found
360 */
361int mei_cl_link(struct mei_cl *cl, int id)
362{
363	struct mei_device *dev;
364	long open_handle_count;
365
366	if (WARN_ON(!cl || !cl->dev))
367		return -EINVAL;
368
369	dev = cl->dev;
370
371	/* If Id is not assigned get one*/
372	if (id == MEI_HOST_CLIENT_ID_ANY)
373		id = find_first_zero_bit(dev->host_clients_map,
374					MEI_CLIENTS_MAX);
375
376	if (id >= MEI_CLIENTS_MAX) {
377		dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
378		return -EMFILE;
379	}
380
381	open_handle_count = dev->open_handle_count + dev->iamthif_open_count;
382	if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
383		dev_err(dev->dev, "open_handle_count exceeded %d",
384			MEI_MAX_OPEN_HANDLE_COUNT);
385		return -EMFILE;
386	}
387
388	dev->open_handle_count++;
389
390	cl->host_client_id = id;
391	list_add_tail(&cl->link, &dev->file_list);
392
393	set_bit(id, dev->host_clients_map);
394
395	cl->state = MEI_FILE_INITIALIZING;
396
397	cl_dbg(dev, cl, "link cl\n");
398	return 0;
399}
400
401/**
402 * mei_cl_unlink - remove me_cl from the list
403 *
404 * @cl: host client
405 */
406int mei_cl_unlink(struct mei_cl *cl)
407{
408	struct mei_device *dev;
409
410	/* don't shout on error exit path */
411	if (!cl)
412		return 0;
413
414	/* wd and amthif might not be initialized */
415	if (!cl->dev)
416		return 0;
417
418	dev = cl->dev;
419
420	cl_dbg(dev, cl, "unlink client");
421
422	if (dev->open_handle_count > 0)
423		dev->open_handle_count--;
424
425	/* never clear the 0 bit */
426	if (cl->host_client_id)
427		clear_bit(cl->host_client_id, dev->host_clients_map);
428
429	list_del_init(&cl->link);
430
431	cl->state = MEI_FILE_INITIALIZING;
432
433	return 0;
434}
435
436
437void mei_host_client_init(struct work_struct *work)
438{
439	struct mei_device *dev = container_of(work,
440					      struct mei_device, init_work);
441	struct mei_me_client *me_cl;
442	struct mei_client_properties *props;
443
444	mutex_lock(&dev->device_lock);
445
446	list_for_each_entry(me_cl, &dev->me_clients, list) {
447		props = &me_cl->props;
448
449		if (!uuid_le_cmp(props->protocol_name, mei_amthif_guid))
450			mei_amthif_host_init(dev);
451		else if (!uuid_le_cmp(props->protocol_name, mei_wd_guid))
452			mei_wd_host_init(dev);
453		else if (!uuid_le_cmp(props->protocol_name, mei_nfc_guid))
454			mei_nfc_host_init(dev);
455
456	}
457
458	dev->dev_state = MEI_DEV_ENABLED;
459	dev->reset_count = 0;
460
461	mutex_unlock(&dev->device_lock);
462
463	pm_runtime_mark_last_busy(dev->dev);
464	dev_dbg(dev->dev, "rpm: autosuspend\n");
465	pm_runtime_autosuspend(dev->dev);
466}
467
468/**
469 * mei_hbuf_acquire - try to acquire host buffer
470 *
471 * @dev: the device structure
472 * Return: true if host buffer was acquired
473 */
474bool mei_hbuf_acquire(struct mei_device *dev)
475{
476	if (mei_pg_state(dev) == MEI_PG_ON ||
477	    dev->pg_event == MEI_PG_EVENT_WAIT) {
478		dev_dbg(dev->dev, "device is in pg\n");
479		return false;
480	}
481
482	if (!dev->hbuf_is_ready) {
483		dev_dbg(dev->dev, "hbuf is not ready\n");
484		return false;
485	}
486
487	dev->hbuf_is_ready = false;
488
489	return true;
490}
491
492/**
493 * mei_cl_disconnect - disconnect host client from the me one
494 *
495 * @cl: host client
496 *
497 * Locking: called under "dev->device_lock" lock
498 *
499 * Return: 0 on success, <0 on failure.
500 */
501int mei_cl_disconnect(struct mei_cl *cl)
502{
503	struct mei_device *dev;
504	struct mei_cl_cb *cb;
505	int rets;
506
507	if (WARN_ON(!cl || !cl->dev))
508		return -ENODEV;
509
510	dev = cl->dev;
511
512	cl_dbg(dev, cl, "disconnecting");
513
514	if (cl->state != MEI_FILE_DISCONNECTING)
515		return 0;
516
517	rets = pm_runtime_get(dev->dev);
518	if (rets < 0 && rets != -EINPROGRESS) {
519		pm_runtime_put_noidle(dev->dev);
520		cl_err(dev, cl, "rpm: get failed %d\n", rets);
521		return rets;
522	}
523
524	cb = mei_io_cb_init(cl, NULL);
525	if (!cb) {
526		rets = -ENOMEM;
527		goto free;
528	}
529
530	cb->fop_type = MEI_FOP_DISCONNECT;
531
532	if (mei_hbuf_acquire(dev)) {
533		if (mei_hbm_cl_disconnect_req(dev, cl)) {
534			rets = -ENODEV;
535			cl_err(dev, cl, "failed to disconnect.\n");
536			goto free;
537		}
538		cl->timer_count = MEI_CONNECT_TIMEOUT;
539		mdelay(10); /* Wait for hardware disconnection ready */
540		list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
541	} else {
542		cl_dbg(dev, cl, "add disconnect cb to control write list\n");
543		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
544
545	}
546	mutex_unlock(&dev->device_lock);
547
548	wait_event_timeout(cl->wait,
549			MEI_FILE_DISCONNECTED == cl->state,
550			mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
551
552	mutex_lock(&dev->device_lock);
553
554	if (MEI_FILE_DISCONNECTED == cl->state) {
555		rets = 0;
556		cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
557	} else {
558		cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
559		rets = -ETIME;
560	}
561
562	mei_io_list_flush(&dev->ctrl_rd_list, cl);
563	mei_io_list_flush(&dev->ctrl_wr_list, cl);
564free:
565	cl_dbg(dev, cl, "rpm: autosuspend\n");
566	pm_runtime_mark_last_busy(dev->dev);
567	pm_runtime_put_autosuspend(dev->dev);
568
569	mei_io_cb_free(cb);
570	return rets;
571}
572
573
574/**
575 * mei_cl_is_other_connecting - checks if other
576 *    client with the same me client id is connecting
577 *
578 * @cl: private data of the file object
579 *
580 * Return: true if other client is connected, false - otherwise.
581 */
582bool mei_cl_is_other_connecting(struct mei_cl *cl)
583{
584	struct mei_device *dev;
585	struct mei_cl *ocl; /* the other client */
586
587	if (WARN_ON(!cl || !cl->dev))
588		return false;
589
590	dev = cl->dev;
591
592	list_for_each_entry(ocl, &dev->file_list, link) {
593		if (ocl->state == MEI_FILE_CONNECTING &&
594		    ocl != cl &&
595		    cl->me_client_id == ocl->me_client_id)
596			return true;
597
598	}
599
600	return false;
601}
602
603/**
604 * mei_cl_connect - connect host client to the me one
605 *
606 * @cl: host client
607 * @file: pointer to file structure
608 *
609 * Locking: called under "dev->device_lock" lock
610 *
611 * Return: 0 on success, <0 on failure.
612 */
613int mei_cl_connect(struct mei_cl *cl, struct file *file)
614{
615	struct mei_device *dev;
616	struct mei_cl_cb *cb;
617	int rets;
618
619	if (WARN_ON(!cl || !cl->dev))
620		return -ENODEV;
621
622	dev = cl->dev;
623
624	rets = pm_runtime_get(dev->dev);
625	if (rets < 0 && rets != -EINPROGRESS) {
626		pm_runtime_put_noidle(dev->dev);
627		cl_err(dev, cl, "rpm: get failed %d\n", rets);
628		return rets;
629	}
630
631	cb = mei_io_cb_init(cl, file);
632	if (!cb) {
633		rets = -ENOMEM;
634		goto out;
635	}
636
637	cb->fop_type = MEI_FOP_CONNECT;
638
639	/* run hbuf acquire last so we don't have to undo */
640	if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
641		cl->state = MEI_FILE_CONNECTING;
642		if (mei_hbm_cl_connect_req(dev, cl)) {
643			rets = -ENODEV;
644			goto out;
645		}
646		cl->timer_count = MEI_CONNECT_TIMEOUT;
647		list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
648	} else {
649		cl->state = MEI_FILE_INITIALIZING;
650		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
651	}
652
653	mutex_unlock(&dev->device_lock);
654	wait_event_timeout(cl->wait,
655			(cl->state == MEI_FILE_CONNECTED ||
656			 cl->state == MEI_FILE_DISCONNECTED),
657			mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
658	mutex_lock(&dev->device_lock);
659
660	if (cl->state != MEI_FILE_CONNECTED) {
661		cl->state = MEI_FILE_DISCONNECTED;
662		/* something went really wrong */
663		if (!cl->status)
664			cl->status = -EFAULT;
665
666		mei_io_list_flush(&dev->ctrl_rd_list, cl);
667		mei_io_list_flush(&dev->ctrl_wr_list, cl);
668	}
669
670	rets = cl->status;
671
672out:
673	cl_dbg(dev, cl, "rpm: autosuspend\n");
674	pm_runtime_mark_last_busy(dev->dev);
675	pm_runtime_put_autosuspend(dev->dev);
676
677	mei_io_cb_free(cb);
678	return rets;
679}
680
681/**
682 * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
683 *
684 * @cl: private data of the file object
685 *
686 * Return: 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
687 *	-ENOENT if mei_cl is not present
688 *	-EINVAL if single_recv_buf == 0
689 */
690int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
691{
692	struct mei_device *dev;
693	struct mei_me_client *me_cl;
694
695	if (WARN_ON(!cl || !cl->dev))
696		return -EINVAL;
697
698	dev = cl->dev;
699
700	if (cl->mei_flow_ctrl_creds > 0)
701		return 1;
702
703	me_cl = mei_me_cl_by_id(dev, cl->me_client_id);
704	if (!me_cl) {
705		cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
706		return -ENOENT;
707	}
708
709	if (me_cl->mei_flow_ctrl_creds) {
710		if (WARN_ON(me_cl->props.single_recv_buf == 0))
711			return -EINVAL;
712		return 1;
713	}
714	return 0;
715}
716
717/**
718 * mei_cl_flow_ctrl_reduce - reduces flow_control.
719 *
720 * @cl: private data of the file object
721 *
722 * Return:
723 *	0 on success
724 *	-ENOENT when me client is not found
725 *	-EINVAL when ctrl credits are <= 0
726 */
727int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
728{
729	struct mei_device *dev;
730	struct mei_me_client *me_cl;
731
732	if (WARN_ON(!cl || !cl->dev))
733		return -EINVAL;
734
735	dev = cl->dev;
736
737	me_cl = mei_me_cl_by_id(dev, cl->me_client_id);
738	if (!me_cl) {
739		cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
740		return -ENOENT;
741	}
742
743	if (me_cl->props.single_recv_buf) {
744		if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
745			return -EINVAL;
746		me_cl->mei_flow_ctrl_creds--;
747	} else {
748		if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
749			return -EINVAL;
750		cl->mei_flow_ctrl_creds--;
751	}
752	return 0;
753}
754
755/**
756 * mei_cl_read_start - the start read client message function.
757 *
758 * @cl: host client
759 *
760 * Return: 0 on success, <0 on failure.
761 */
762int mei_cl_read_start(struct mei_cl *cl, size_t length)
763{
764	struct mei_device *dev;
765	struct mei_cl_cb *cb;
766	struct mei_me_client *me_cl;
767	int rets;
768
769	if (WARN_ON(!cl || !cl->dev))
770		return -ENODEV;
771
772	dev = cl->dev;
773
774	if (!mei_cl_is_connected(cl))
775		return -ENODEV;
776
777	if (cl->read_cb) {
778		cl_dbg(dev, cl, "read is pending.\n");
779		return -EBUSY;
780	}
781	me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id);
782	if (!me_cl) {
783		cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
784		return  -ENOTTY;
785	}
786
787	rets = pm_runtime_get(dev->dev);
788	if (rets < 0 && rets != -EINPROGRESS) {
789		pm_runtime_put_noidle(dev->dev);
790		cl_err(dev, cl, "rpm: get failed %d\n", rets);
791		return rets;
792	}
793
794	cb = mei_io_cb_init(cl, NULL);
795	if (!cb) {
796		rets = -ENOMEM;
797		goto out;
798	}
799
800	/* always allocate at least client max message */
801	length = max_t(size_t, length, me_cl->props.max_msg_length);
802	rets = mei_io_cb_alloc_resp_buf(cb, length);
803	if (rets)
804		goto out;
805
806	cb->fop_type = MEI_FOP_READ;
807	if (mei_hbuf_acquire(dev)) {
808		rets = mei_hbm_cl_flow_control_req(dev, cl);
809		if (rets < 0)
810			goto out;
811
812		list_add_tail(&cb->list, &dev->read_list.list);
813	} else {
814		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
815	}
816
817	cl->read_cb = cb;
818
819out:
820	cl_dbg(dev, cl, "rpm: autosuspend\n");
821	pm_runtime_mark_last_busy(dev->dev);
822	pm_runtime_put_autosuspend(dev->dev);
823
824	if (rets)
825		mei_io_cb_free(cb);
826
827	return rets;
828}
829
830/**
831 * mei_cl_irq_write - write a message to device
832 *	from the interrupt thread context
833 *
834 * @cl: client
835 * @cb: callback block.
836 * @cmpl_list: complete list.
837 *
838 * Return: 0, OK; otherwise error.
839 */
840int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
841		     struct mei_cl_cb *cmpl_list)
842{
843	struct mei_device *dev;
844	struct mei_msg_data *buf;
845	struct mei_msg_hdr mei_hdr;
846	size_t len;
847	u32 msg_slots;
848	int slots;
849	int rets;
850
851	if (WARN_ON(!cl || !cl->dev))
852		return -ENODEV;
853
854	dev = cl->dev;
855
856	buf = &cb->request_buffer;
857
858	rets = mei_cl_flow_ctrl_creds(cl);
859	if (rets < 0)
860		return rets;
861
862	if (rets == 0) {
863		cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
864		return 0;
865	}
866
867	slots = mei_hbuf_empty_slots(dev);
868	len = buf->size - cb->buf_idx;
869	msg_slots = mei_data2slots(len);
870
871	mei_hdr.host_addr = cl->host_client_id;
872	mei_hdr.me_addr = cl->me_client_id;
873	mei_hdr.reserved = 0;
874	mei_hdr.internal = cb->internal;
875
876	if (slots >= msg_slots) {
877		mei_hdr.length = len;
878		mei_hdr.msg_complete = 1;
879	/* Split the message only if we can write the whole host buffer */
880	} else if (slots == dev->hbuf_depth) {
881		msg_slots = slots;
882		len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
883		mei_hdr.length = len;
884		mei_hdr.msg_complete = 0;
885	} else {
886		/* wait for next time the host buffer is empty */
887		return 0;
888	}
889
890	cl_dbg(dev, cl, "buf: size = %d idx = %lu\n",
891			cb->request_buffer.size, cb->buf_idx);
892
893	rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
894	if (rets) {
895		cl->status = rets;
896		list_move_tail(&cb->list, &cmpl_list->list);
897		return rets;
898	}
899
900	cl->status = 0;
901	cl->writing_state = MEI_WRITING;
902	cb->buf_idx += mei_hdr.length;
903
904	if (mei_hdr.msg_complete) {
905		if (mei_cl_flow_ctrl_reduce(cl))
906			return -EIO;
907		list_move_tail(&cb->list, &dev->write_waiting_list.list);
908	}
909
910	return 0;
911}
912
913/**
914 * mei_cl_write - submit a write cb to mei device
915 *	assumes device_lock is locked
916 *
917 * @cl: host client
918 * @cb: write callback with filled data
919 *
920 * Return: number of bytes sent on success, <0 on failure.
921 */
922int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
923{
924	struct mei_device *dev;
925	struct mei_msg_data *buf;
926	struct mei_msg_hdr mei_hdr;
927	int rets;
928
929
930	if (WARN_ON(!cl || !cl->dev))
931		return -ENODEV;
932
933	if (WARN_ON(!cb))
934		return -EINVAL;
935
936	dev = cl->dev;
937
938
939	buf = &cb->request_buffer;
940
941	cl_dbg(dev, cl, "size=%d\n", buf->size);
942
943	rets = pm_runtime_get(dev->dev);
944	if (rets < 0 && rets != -EINPROGRESS) {
945		pm_runtime_put_noidle(dev->dev);
946		cl_err(dev, cl, "rpm: get failed %d\n", rets);
947		return rets;
948	}
949
950	cb->fop_type = MEI_FOP_WRITE;
951	cb->buf_idx = 0;
952	cl->writing_state = MEI_IDLE;
953
954	mei_hdr.host_addr = cl->host_client_id;
955	mei_hdr.me_addr = cl->me_client_id;
956	mei_hdr.reserved = 0;
957	mei_hdr.msg_complete = 0;
958	mei_hdr.internal = cb->internal;
959
960	rets = mei_cl_flow_ctrl_creds(cl);
961	if (rets < 0)
962		goto err;
963
964	if (rets == 0) {
965		cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
966		rets = buf->size;
967		goto out;
968	}
969	if (!mei_hbuf_acquire(dev)) {
970		cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
971		rets = buf->size;
972		goto out;
973	}
974
975	/* Check for a maximum length */
976	if (buf->size > mei_hbuf_max_len(dev)) {
977		mei_hdr.length = mei_hbuf_max_len(dev);
978		mei_hdr.msg_complete = 0;
979	} else {
980		mei_hdr.length = buf->size;
981		mei_hdr.msg_complete = 1;
982	}
983
984	rets = mei_write_message(dev, &mei_hdr, buf->data);
985	if (rets)
986		goto err;
987
988	cl->writing_state = MEI_WRITING;
989	cb->buf_idx = mei_hdr.length;
990
991out:
992	if (mei_hdr.msg_complete) {
993		rets = mei_cl_flow_ctrl_reduce(cl);
994		if (rets < 0)
995			goto err;
996
997		list_add_tail(&cb->list, &dev->write_waiting_list.list);
998	} else {
999		list_add_tail(&cb->list, &dev->write_list.list);
1000	}
1001
1002
1003	if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
1004
1005		mutex_unlock(&dev->device_lock);
1006		rets = wait_event_interruptible(cl->tx_wait,
1007				cl->writing_state == MEI_WRITE_COMPLETE);
1008		mutex_lock(&dev->device_lock);
1009		/* wait_event_interruptible returns -ERESTARTSYS */
1010		if (rets) {
1011			if (signal_pending(current))
1012				rets = -EINTR;
1013			goto err;
1014		}
1015	}
1016
1017	rets = buf->size;
1018err:
1019	cl_dbg(dev, cl, "rpm: autosuspend\n");
1020	pm_runtime_mark_last_busy(dev->dev);
1021	pm_runtime_put_autosuspend(dev->dev);
1022
1023	return rets;
1024}
1025
1026
1027/**
1028 * mei_cl_complete - processes completed operation for a client
1029 *
1030 * @cl: private data of the file object.
1031 * @cb: callback block.
1032 */
1033void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
1034{
1035	if (cb->fop_type == MEI_FOP_WRITE) {
1036		mei_io_cb_free(cb);
1037		cb = NULL;
1038		cl->writing_state = MEI_WRITE_COMPLETE;
1039		if (waitqueue_active(&cl->tx_wait))
1040			wake_up_interruptible(&cl->tx_wait);
1041
1042	} else if (cb->fop_type == MEI_FOP_READ &&
1043			MEI_READING == cl->reading_state) {
1044		cl->reading_state = MEI_READ_COMPLETE;
1045		if (waitqueue_active(&cl->rx_wait))
1046			wake_up_interruptible(&cl->rx_wait);
1047		else
1048			mei_cl_bus_rx_event(cl);
1049
1050	}
1051}
1052
1053
1054/**
1055 * mei_cl_all_disconnect - disconnect forcefully all connected clients
1056 *
1057 * @dev: mei device
1058 */
1059
1060void mei_cl_all_disconnect(struct mei_device *dev)
1061{
1062	struct mei_cl *cl;
1063
1064	list_for_each_entry(cl, &dev->file_list, link) {
1065		cl->state = MEI_FILE_DISCONNECTED;
1066		cl->mei_flow_ctrl_creds = 0;
1067		cl->timer_count = 0;
1068	}
1069}
1070
1071
1072/**
1073 * mei_cl_all_wakeup  - wake up all readers and writers they can be interrupted
1074 *
1075 * @dev: mei device
1076 */
1077void mei_cl_all_wakeup(struct mei_device *dev)
1078{
1079	struct mei_cl *cl;
1080
1081	list_for_each_entry(cl, &dev->file_list, link) {
1082		if (waitqueue_active(&cl->rx_wait)) {
1083			cl_dbg(dev, cl, "Waking up reading client!\n");
1084			wake_up_interruptible(&cl->rx_wait);
1085		}
1086		if (waitqueue_active(&cl->tx_wait)) {
1087			cl_dbg(dev, cl, "Waking up writing client!\n");
1088			wake_up_interruptible(&cl->tx_wait);
1089		}
1090	}
1091}
1092
1093/**
1094 * mei_cl_all_write_clear - clear all pending writes
1095 *
1096 * @dev: mei device
1097 */
1098void mei_cl_all_write_clear(struct mei_device *dev)
1099{
1100	mei_io_list_free(&dev->write_list, NULL);
1101	mei_io_list_free(&dev->write_waiting_list, NULL);
1102}
1103
1104
1105