1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * Implementation of cl_page for VVP layer.
37 *
38 *   Author: Nikita Danilov <nikita.danilov@sun.com>
39 *   Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
40 */
41
42#define DEBUG_SUBSYSTEM S_LLITE
43
44
45#include "../include/obd.h"
46#include "../include/lustre_lite.h"
47
48#include "vvp_internal.h"
49
50/*****************************************************************************
51 *
52 * Page operations.
53 *
54 */
55
56static void vvp_page_fini_common(struct ccc_page *cp)
57{
58	struct page *vmpage = cp->cpg_page;
59
60	LASSERT(vmpage != NULL);
61	page_cache_release(vmpage);
62}
63
64static void vvp_page_fini(const struct lu_env *env,
65			  struct cl_page_slice *slice)
66{
67	struct ccc_page *cp = cl2ccc_page(slice);
68	struct page *vmpage  = cp->cpg_page;
69
70	/*
71	 * vmpage->private was already cleared when page was moved into
72	 * VPG_FREEING state.
73	 */
74	LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
75	vvp_page_fini_common(cp);
76}
77
78static int vvp_page_own(const struct lu_env *env,
79			const struct cl_page_slice *slice, struct cl_io *io,
80			int nonblock)
81{
82	struct ccc_page *vpg    = cl2ccc_page(slice);
83	struct page      *vmpage = vpg->cpg_page;
84
85	LASSERT(vmpage != NULL);
86	if (nonblock) {
87		if (!trylock_page(vmpage))
88			return -EAGAIN;
89
90		if (unlikely(PageWriteback(vmpage))) {
91			unlock_page(vmpage);
92			return -EAGAIN;
93		}
94
95		return 0;
96	}
97
98	lock_page(vmpage);
99	wait_on_page_writeback(vmpage);
100	return 0;
101}
102
103static void vvp_page_assume(const struct lu_env *env,
104			    const struct cl_page_slice *slice,
105			    struct cl_io *unused)
106{
107	struct page *vmpage = cl2vm_page(slice);
108
109	LASSERT(vmpage != NULL);
110	LASSERT(PageLocked(vmpage));
111	wait_on_page_writeback(vmpage);
112}
113
114static void vvp_page_unassume(const struct lu_env *env,
115			      const struct cl_page_slice *slice,
116			      struct cl_io *unused)
117{
118	struct page *vmpage = cl2vm_page(slice);
119
120	LASSERT(vmpage != NULL);
121	LASSERT(PageLocked(vmpage));
122}
123
124static void vvp_page_disown(const struct lu_env *env,
125			    const struct cl_page_slice *slice, struct cl_io *io)
126{
127	struct page *vmpage = cl2vm_page(slice);
128
129	LASSERT(vmpage != NULL);
130	LASSERT(PageLocked(vmpage));
131
132	unlock_page(cl2vm_page(slice));
133}
134
135static void vvp_page_discard(const struct lu_env *env,
136			     const struct cl_page_slice *slice,
137			     struct cl_io *unused)
138{
139	struct page	   *vmpage  = cl2vm_page(slice);
140	struct address_space *mapping;
141	struct ccc_page      *cpg     = cl2ccc_page(slice);
142
143	LASSERT(vmpage != NULL);
144	LASSERT(PageLocked(vmpage));
145
146	mapping = vmpage->mapping;
147
148	if (cpg->cpg_defer_uptodate && !cpg->cpg_ra_used)
149		ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
150
151	/*
152	 * truncate_complete_page() calls
153	 * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
154	 */
155	truncate_complete_page(mapping, vmpage);
156}
157
158static int vvp_page_unmap(const struct lu_env *env,
159			  const struct cl_page_slice *slice,
160			  struct cl_io *unused)
161{
162	struct page *vmpage = cl2vm_page(slice);
163	__u64       offset;
164
165	LASSERT(vmpage != NULL);
166	LASSERT(PageLocked(vmpage));
167
168	offset = vmpage->index << PAGE_CACHE_SHIFT;
169
170	/*
171	 * XXX is it safe to call this with the page lock held?
172	 */
173	ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_CACHE_SIZE);
174	return 0;
175}
176
177static void vvp_page_delete(const struct lu_env *env,
178			    const struct cl_page_slice *slice)
179{
180	struct page       *vmpage = cl2vm_page(slice);
181	struct inode     *inode  = vmpage->mapping->host;
182	struct cl_object *obj    = slice->cpl_obj;
183
184	LASSERT(PageLocked(vmpage));
185	LASSERT((struct cl_page *)vmpage->private == slice->cpl_page);
186	LASSERT(inode == ccc_object_inode(obj));
187
188	vvp_write_complete(cl2ccc(obj), cl2ccc_page(slice));
189	ClearPagePrivate(vmpage);
190	vmpage->private = 0;
191	/*
192	 * Reference from vmpage to cl_page is removed, but the reference back
193	 * is still here. It is removed later in vvp_page_fini().
194	 */
195}
196
197static void vvp_page_export(const struct lu_env *env,
198			    const struct cl_page_slice *slice,
199			    int uptodate)
200{
201	struct page *vmpage = cl2vm_page(slice);
202
203	LASSERT(vmpage != NULL);
204	LASSERT(PageLocked(vmpage));
205	if (uptodate)
206		SetPageUptodate(vmpage);
207	else
208		ClearPageUptodate(vmpage);
209}
210
211static int vvp_page_is_vmlocked(const struct lu_env *env,
212				const struct cl_page_slice *slice)
213{
214	return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA;
215}
216
217static int vvp_page_prep_read(const struct lu_env *env,
218			      const struct cl_page_slice *slice,
219			      struct cl_io *unused)
220{
221	/* Skip the page already marked as PG_uptodate. */
222	return PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0;
223}
224
225static int vvp_page_prep_write(const struct lu_env *env,
226			       const struct cl_page_slice *slice,
227			       struct cl_io *unused)
228{
229	struct page *vmpage = cl2vm_page(slice);
230
231	LASSERT(PageLocked(vmpage));
232	LASSERT(!PageDirty(vmpage));
233
234	set_page_writeback(vmpage);
235	vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice));
236
237	return 0;
238}
239
240/**
241 * Handles page transfer errors at VM level.
242 *
243 * This takes inode as a separate argument, because inode on which error is to
244 * be set can be different from \a vmpage inode in case of direct-io.
245 */
246static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret)
247{
248	struct ccc_object *obj = cl_inode2ccc(inode);
249
250	if (ioret == 0) {
251		ClearPageError(vmpage);
252		obj->cob_discard_page_warned = 0;
253	} else {
254		SetPageError(vmpage);
255		if (ioret == -ENOSPC)
256			set_bit(AS_ENOSPC, &inode->i_mapping->flags);
257		else
258			set_bit(AS_EIO, &inode->i_mapping->flags);
259
260		if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
261		     obj->cob_discard_page_warned == 0) {
262			obj->cob_discard_page_warned = 1;
263			ll_dirty_page_discard_warn(vmpage, ioret);
264		}
265	}
266}
267
268static void vvp_page_completion_read(const struct lu_env *env,
269				     const struct cl_page_slice *slice,
270				     int ioret)
271{
272	struct ccc_page *cp     = cl2ccc_page(slice);
273	struct page      *vmpage = cp->cpg_page;
274	struct cl_page  *page   = cl_page_top(slice->cpl_page);
275	struct inode    *inode  = ccc_object_inode(page->cp_obj);
276
277	LASSERT(PageLocked(vmpage));
278	CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
279
280	if (cp->cpg_defer_uptodate)
281		ll_ra_count_put(ll_i2sbi(inode), 1);
282
283	if (ioret == 0)  {
284		if (!cp->cpg_defer_uptodate)
285			cl_page_export(env, page, 1);
286	} else
287		cp->cpg_defer_uptodate = 0;
288
289	if (page->cp_sync_io == NULL)
290		unlock_page(vmpage);
291}
292
293static void vvp_page_completion_write(const struct lu_env *env,
294				      const struct cl_page_slice *slice,
295				      int ioret)
296{
297	struct ccc_page *cp     = cl2ccc_page(slice);
298	struct cl_page  *pg     = slice->cpl_page;
299	struct page      *vmpage = cp->cpg_page;
300
301	LASSERT(ergo(pg->cp_sync_io != NULL, PageLocked(vmpage)));
302	LASSERT(PageWriteback(vmpage));
303
304	CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
305
306	/*
307	 * TODO: Actually it makes sense to add the page into oap pending
308	 * list again and so that we don't need to take the page out from
309	 * SoM write pending list, if we just meet a recoverable error,
310	 * -ENOMEM, etc.
311	 * To implement this, we just need to return a non zero value in
312	 * ->cpo_completion method. The underlying transfer should be notified
313	 * and then re-add the page into pending transfer queue.  -jay
314	 */
315
316	cp->cpg_write_queued = 0;
317	vvp_write_complete(cl2ccc(slice->cpl_obj), cp);
318
319	/*
320	 * Only mark the page error only when it's an async write because
321	 * applications won't wait for IO to finish.
322	 */
323	if (pg->cp_sync_io == NULL)
324		vvp_vmpage_error(ccc_object_inode(pg->cp_obj), vmpage, ioret);
325
326	end_page_writeback(vmpage);
327}
328
329/**
330 * Implements cl_page_operations::cpo_make_ready() method.
331 *
332 * This is called to yank a page from the transfer cache and to send it out as
333 * a part of transfer. This function try-locks the page. If try-lock failed,
334 * page is owned by some concurrent IO, and should be skipped (this is bad,
335 * but hopefully rare situation, as it usually results in transfer being
336 * shorter than possible).
337 *
338 * \retval 0      success, page can be placed into transfer
339 *
340 * \retval -EAGAIN page is either used by concurrent IO has been
341 * truncated. Skip it.
342 */
343static int vvp_page_make_ready(const struct lu_env *env,
344			       const struct cl_page_slice *slice)
345{
346	struct page *vmpage = cl2vm_page(slice);
347	struct cl_page *pg = slice->cpl_page;
348	int result = 0;
349
350	lock_page(vmpage);
351	if (clear_page_dirty_for_io(vmpage)) {
352		LASSERT(pg->cp_state == CPS_CACHED);
353		/* This actually clears the dirty bit in the radix
354		 * tree. */
355		set_page_writeback(vmpage);
356		vvp_write_pending(cl2ccc(slice->cpl_obj),
357				cl2ccc_page(slice));
358		CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
359	} else if (pg->cp_state == CPS_PAGEOUT) {
360		/* is it possible for osc_flush_async_page() to already
361		 * make it ready? */
362		result = -EALREADY;
363	} else {
364		CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
365			      pg->cp_state);
366		LBUG();
367	}
368	unlock_page(vmpage);
369	return result;
370}
371
372static int vvp_page_print(const struct lu_env *env,
373			  const struct cl_page_slice *slice,
374			  void *cookie, lu_printer_t printer)
375{
376	struct ccc_page *vp = cl2ccc_page(slice);
377	struct page      *vmpage = vp->cpg_page;
378
379	(*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d:%d) "
380		   "vm@%p ",
381		   vp, vp->cpg_defer_uptodate, vp->cpg_ra_used,
382		   vp->cpg_write_queued, vmpage);
383	if (vmpage != NULL) {
384		(*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
385			   (long)vmpage->flags, page_count(vmpage),
386			   page_mapcount(vmpage), vmpage->private,
387			   page_index(vmpage),
388			   list_empty(&vmpage->lru) ? "not-" : "");
389	}
390	(*printer)(env, cookie, "\n");
391	return 0;
392}
393
394static const struct cl_page_operations vvp_page_ops = {
395	.cpo_own	   = vvp_page_own,
396	.cpo_assume	= vvp_page_assume,
397	.cpo_unassume      = vvp_page_unassume,
398	.cpo_disown	= vvp_page_disown,
399	.cpo_vmpage	= ccc_page_vmpage,
400	.cpo_discard       = vvp_page_discard,
401	.cpo_delete	= vvp_page_delete,
402	.cpo_unmap	 = vvp_page_unmap,
403	.cpo_export	= vvp_page_export,
404	.cpo_is_vmlocked   = vvp_page_is_vmlocked,
405	.cpo_fini	  = vvp_page_fini,
406	.cpo_print	 = vvp_page_print,
407	.cpo_is_under_lock = ccc_page_is_under_lock,
408	.io = {
409		[CRT_READ] = {
410			.cpo_prep	= vvp_page_prep_read,
411			.cpo_completion  = vvp_page_completion_read,
412			.cpo_make_ready  = ccc_fail,
413		},
414		[CRT_WRITE] = {
415			.cpo_prep	= vvp_page_prep_write,
416			.cpo_completion  = vvp_page_completion_write,
417			.cpo_make_ready  = vvp_page_make_ready,
418		}
419	}
420};
421
422static void vvp_transient_page_verify(const struct cl_page *page)
423{
424	struct inode *inode = ccc_object_inode(page->cp_obj);
425
426	LASSERT(!mutex_trylock(&inode->i_mutex));
427}
428
429static int vvp_transient_page_own(const struct lu_env *env,
430				  const struct cl_page_slice *slice,
431				  struct cl_io *unused, int nonblock)
432{
433	vvp_transient_page_verify(slice->cpl_page);
434	return 0;
435}
436
437static void vvp_transient_page_assume(const struct lu_env *env,
438				      const struct cl_page_slice *slice,
439				      struct cl_io *unused)
440{
441	vvp_transient_page_verify(slice->cpl_page);
442}
443
444static void vvp_transient_page_unassume(const struct lu_env *env,
445					const struct cl_page_slice *slice,
446					struct cl_io *unused)
447{
448	vvp_transient_page_verify(slice->cpl_page);
449}
450
451static void vvp_transient_page_disown(const struct lu_env *env,
452				      const struct cl_page_slice *slice,
453				      struct cl_io *unused)
454{
455	vvp_transient_page_verify(slice->cpl_page);
456}
457
458static void vvp_transient_page_discard(const struct lu_env *env,
459				       const struct cl_page_slice *slice,
460				       struct cl_io *unused)
461{
462	struct cl_page *page = slice->cpl_page;
463
464	vvp_transient_page_verify(slice->cpl_page);
465
466	/*
467	 * For transient pages, remove it from the radix tree.
468	 */
469	cl_page_delete(env, page);
470}
471
472static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
473					  const struct cl_page_slice *slice)
474{
475	struct inode    *inode = ccc_object_inode(slice->cpl_obj);
476	int	locked;
477
478	locked = !mutex_trylock(&inode->i_mutex);
479	if (!locked)
480		mutex_unlock(&inode->i_mutex);
481	return locked ? -EBUSY : -ENODATA;
482}
483
484static void
485vvp_transient_page_completion(const struct lu_env *env,
486			      const struct cl_page_slice *slice,
487			      int ioret)
488{
489	vvp_transient_page_verify(slice->cpl_page);
490}
491
492static void vvp_transient_page_fini(const struct lu_env *env,
493				    struct cl_page_slice *slice)
494{
495	struct ccc_page *cp = cl2ccc_page(slice);
496	struct cl_page *clp = slice->cpl_page;
497	struct ccc_object *clobj = cl2ccc(clp->cp_obj);
498
499	vvp_page_fini_common(cp);
500	LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
501	clobj->cob_transient_pages--;
502}
503
504static const struct cl_page_operations vvp_transient_page_ops = {
505	.cpo_own	   = vvp_transient_page_own,
506	.cpo_assume	= vvp_transient_page_assume,
507	.cpo_unassume      = vvp_transient_page_unassume,
508	.cpo_disown	= vvp_transient_page_disown,
509	.cpo_discard       = vvp_transient_page_discard,
510	.cpo_vmpage	= ccc_page_vmpage,
511	.cpo_fini	  = vvp_transient_page_fini,
512	.cpo_is_vmlocked   = vvp_transient_page_is_vmlocked,
513	.cpo_print	 = vvp_page_print,
514	.cpo_is_under_lock = ccc_page_is_under_lock,
515	.io = {
516		[CRT_READ] = {
517			.cpo_prep	= ccc_transient_page_prep,
518			.cpo_completion  = vvp_transient_page_completion,
519		},
520		[CRT_WRITE] = {
521			.cpo_prep	= ccc_transient_page_prep,
522			.cpo_completion  = vvp_transient_page_completion,
523		}
524	}
525};
526
527int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
528		struct cl_page *page, struct page *vmpage)
529{
530	struct ccc_page *cpg = cl_object_page_slice(obj, page);
531
532	CLOBINVRNT(env, obj, ccc_object_invariant(obj));
533
534	cpg->cpg_page = vmpage;
535	page_cache_get(vmpage);
536
537	INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
538	if (page->cp_type == CPT_CACHEABLE) {
539		SetPagePrivate(vmpage);
540		vmpage->private = (unsigned long)page;
541		cl_page_slice_add(page, &cpg->cpg_cl, obj,
542				&vvp_page_ops);
543	} else {
544		struct ccc_object *clobj = cl2ccc(obj);
545
546		LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
547		cl_page_slice_add(page, &cpg->cpg_cl, obj,
548				&vvp_transient_page_ops);
549		clobj->cob_transient_pages++;
550	}
551	return 0;
552}
553