Searched defs:task (Results 1 - 7 of 7) sorted by relevance

/mm/
H A Dprocess_vm_access.c25 * process_vm_rw_pages - read/write pages from task specified
26 * @task: task to read/write from
27 * @mm: mm for task
30 * @pa: address of page in task to start copying from/to
42 static int process_vm_rw_pages(struct task_struct *task, argument
68 pages_pinned = get_user_pages(task, mm, pa,
155 * process_vm_rw_single_vec - read/write pages from task specified
164 * @mm: mm for task
165 * @task
170 process_vm_rw_single_vec(unsigned long addr, unsigned long len, const struct iovec *lvec, unsigned long lvec_cnt, unsigned long *lvec_current, size_t *lvec_offset, struct page **process_pages, struct mm_struct *mm, struct task_struct *task, int vm_write, ssize_t *bytes_copied) argument
247 struct task_struct *task; local
[all...]
H A Doom_kill.c90 * has_intersects_mems_allowed() - check task eligiblity for kill
91 * @tsk: task struct of which task to consider
94 * Task eligibility is determined by whether or not a candidate task, @tsk,
153 /* return true if the task is not adequate as candidate victim task. */
174 * oom_badness - heuristic function to determine which candidate task to kill
175 * @p: task struct of which task we should calculate
178 * The heuristic for determining which task t
393 struct task_struct *task; local
[all...]
H A Dbacking-dev.c318 if (bdi->wb.task) {
320 wake_up_process(bdi->wb.task);
328 wake_up_process(default_backing_dev_info.wb.task);
388 struct task_struct *task = NULL; local
409 * waking us up to do work for them. Set the task state here
431 if (!bdi->wb.task && have_dirty_io) {
449 if (bdi->wb.task && !have_dirty_io &&
452 task = bdi->wb.task;
453 bdi->wb.task
587 struct task_struct *task; local
[all...]
H A Dmigrate.c1179 static int do_pages_move(struct mm_struct *mm, struct task_struct *task, argument
1191 task_nodes = cpuset_mems_allowed(task);
1351 struct task_struct *task; local
1364 task = pid ? find_task_by_vpid(pid) : current;
1365 if (!task) {
1369 mm = get_task_mm(task);
1382 tcred = __task_cred(task);
1392 err = security_task_movememory(task);
1397 err = do_pages_move(mm, task, nr_pages, pages, nodes, status,
H A Dmemory.c128 static void __sync_task_rss_stat(struct task_struct *task, struct mm_struct *mm) argument
133 if (task->rss_stat.count[i]) {
134 add_mm_counter(mm, i, task->rss_stat.count[i]);
135 task->rss_stat.count[i] = 0;
138 task->rss_stat.events = 0;
143 struct task_struct *task = current; local
145 if (likely(task->mm == mm))
146 task->rss_stat.count[member] += val;
155 static void check_sync_rss_stat(struct task_struct *task) argument
157 if (unlikely(task !
181 sync_mm_rss(struct task_struct *task, struct mm_struct *mm) argument
190 check_sync_rss_stat(struct task_struct *task) argument
[all...]
H A Dmempolicy.c123 * If read-side task has no lock to protect task->mempolicy, write-side
124 * task will rebind the task->mempolicy by two step. The first step is
128 * If we have a lock to protect task->mempolicy in read-side, we do
204 * Must be called holding task's alloc_lock to protect task's mems_allowed
375 * If read-side task has no lock to protect task->mempolicy, write-side
376 * task wil
1296 struct task_struct *task; local
1499 get_vma_policy(struct task_struct *task, struct vm_area_struct *vma, unsigned long addr) argument
[all...]
H A Dmemcontrol.c278 * Should we move charges of a task when a task is moved into this
298 /* Stuffs for move charges at task migration. */
317 struct task_struct *moving_task; /* a task moving charges */
390 * the current task's memcg won't help us in this case.
1123 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg) argument
1129 p = find_lock_task_mm(task);
1139 task_lock(task);
1140 curr = mem_cgroup_from_task(task);
1143 task_unlock(task);
[all...]

Completed in 85 milliseconds