Searched refs:lock (Results 1 - 25 of 2670) sorted by relevance

1234567891011>>

/drivers/gpu/drm/ttm/
H A Dttm_lock.c45 void ttm_lock_init(struct ttm_lock *lock) argument
47 spin_lock_init(&lock->lock);
48 init_waitqueue_head(&lock->queue);
49 lock->rw = 0;
50 lock->flags = 0;
51 lock->kill_takers = false;
52 lock->signal = SIGKILL;
56 void ttm_read_unlock(struct ttm_lock *lock) argument
58 spin_lock(&lock
65 __ttm_read_lock(struct ttm_lock *lock) argument
83 ttm_read_lock(struct ttm_lock *lock, bool interruptible) argument
96 __ttm_read_trylock(struct ttm_lock *lock, bool *locked) argument
120 ttm_read_trylock(struct ttm_lock *lock, bool interruptible) argument
139 ttm_write_unlock(struct ttm_lock *lock) argument
148 __ttm_write_lock(struct ttm_lock *lock) argument
169 ttm_write_lock(struct ttm_lock *lock, bool interruptible) argument
189 __ttm_vt_unlock(struct ttm_lock *lock) argument
206 struct ttm_lock *lock = container_of(base, struct ttm_lock, base); local
214 __ttm_vt_lock(struct ttm_lock *lock) argument
230 ttm_vt_lock(struct ttm_lock *lock, bool interruptible, struct ttm_object_file *tfile) argument
266 ttm_vt_unlock(struct ttm_lock *lock) argument
273 ttm_suspend_unlock(struct ttm_lock *lock) argument
282 __ttm_suspend_lock(struct ttm_lock *lock) argument
298 ttm_suspend_lock(struct ttm_lock *lock) argument
[all...]
/drivers/staging/lustre/lustre/ldlm/
H A Dl_lock.c44 * Lock a lock and its resource.
47 * but there is a case when we change resource of lock upon
48 * enqueue reply. We rely on lock->l_resource = new_res
51 struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock) argument
53 /* on server-side resource of lock doesn't change */
54 if ((lock->l_flags & LDLM_FL_NS_SRV) == 0)
55 spin_lock(&lock->l_lock);
57 lock_res(lock->l_resource);
59 lock->l_flags |= LDLM_FL_RES_LOCKED;
60 return lock
67 unlock_res_and_lock(struct ldlm_lock *lock) argument
[all...]
H A Dldlm_lock.c49 /* lock types */
93 * Converts lock policy from local format to on the wire lock_desc format
107 * Converts lock policy from on the wire lock_desc format to local format
169 * Get a reference on a lock.
173 * - one for being a lock that's in-use
174 * - one for the addref associated with a new lock
176 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock) argument
178 atomic_inc(&lock->l_refc);
179 return lock;
184 * Release lock referenc
188 ldlm_lock_put(struct ldlm_lock *lock) argument
226 ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock) argument
244 ldlm_lock_remove_from_lru(struct ldlm_lock *lock) argument
263 ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock) argument
281 ldlm_lock_add_to_lru(struct ldlm_lock *lock) argument
294 ldlm_lock_touch_in_lru(struct ldlm_lock *lock) argument
330 ldlm_lock_destroy_internal(struct ldlm_lock *lock) argument
377 ldlm_lock_destroy(struct ldlm_lock *lock) argument
395 ldlm_lock_destroy_nolock(struct ldlm_lock *lock) argument
408 lock_handle_addref(void *lock) argument
413 lock_handle_free(void *lock, int size) argument
434 struct ldlm_lock *lock; local
485 ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock, const struct ldlm_res_id *new_resid) argument
553 ldlm_lock2handle(const struct ldlm_lock *lock, struct lustre_handle *lockh) argument
568 struct ldlm_lock *lock; local
614 ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc) argument
630 ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new, struct list_head *work_list) argument
651 ldlm_add_cp_work_item(struct ldlm_lock *lock, struct list_head *work_list) argument
668 ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new, struct list_head *work_list) argument
685 struct ldlm_lock *lock; local
701 ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode) argument
727 struct ldlm_lock *lock; local
751 ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode) argument
765 ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode) argument
791 ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) argument
861 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0); local
877 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0); local
915 struct ldlm_lock *lock, *mode_end, *policy_end; local
991 ldlm_granted_list_add_lock(struct ldlm_lock *lock, struct sl_insert_point *prev) argument
1026 ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock) argument
1047 ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list) argument
1082 struct ldlm_lock *lock; local
1154 ldlm_lock_fail_match_locked(struct ldlm_lock *lock) argument
1163 ldlm_lock_fail_match(struct ldlm_lock *lock) argument
1178 ldlm_lock_allow_match_locked(struct ldlm_lock *lock) argument
1189 ldlm_lock_allow_match(struct ldlm_lock *lock) argument
1233 struct ldlm_lock *lock, *old_lock = NULL; local
1359 struct ldlm_lock *lock; local
1388 ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, enum req_location loc, void *data, int size) argument
1497 struct ldlm_lock *lock; local
1560 struct ldlm_lock *lock = *lockp; local
1666 struct ldlm_lock *lock; local
1701 struct ldlm_lock *lock; local
1746 struct ldlm_lock *lock; local
1772 struct ldlm_lock *lock; local
1917 ldlm_cancel_callback(struct ldlm_lock *lock) argument
1950 ldlm_lock_cancel(struct ldlm_lock *lock) argument
1996 struct ldlm_lock *lock = ldlm_handle2lock(lockh); local
2025 struct ldlm_lock *lock = cfs_hash_object(hs, hnode); local
2076 ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode) argument
2103 ldlm_lock_convert(struct ldlm_lock *lock, int new_mode, __u32 *flags) argument
2193 struct ldlm_lock *lock; local
2212 _ldlm_lock_debug(struct ldlm_lock *lock, struct libcfs_debug_msg_data *msgdata, const char *fmt, ...) argument
[all...]
H A Dldlm_flock.c42 * This file implements POSIX lock type for Lustre.
48 * merged into a single wider lock.
52 * NL to request a releasing of a portion of the lock
66 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
81 ldlm_same_flock_owner(struct ldlm_lock *lock, struct ldlm_lock *new) argument
84 lock->l_policy_data.l_flock.owner) &&
85 (new->l_export == lock->l_export));
89 ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new) argument
92 lock->l_policy_data.l_flock.end) &&
94 lock
97 ldlm_flock_blocking_link(struct ldlm_lock *req, struct ldlm_lock *lock) argument
132 ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags) argument
178 struct ldlm_lock *lock = NULL; local
207 ldlm_flock_cancel_on_deadlock(struct ldlm_lock *lock, struct list_head *work_list) argument
253 struct ldlm_lock *lock = NULL; local
557 struct ldlm_lock *lock; local
581 ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data) argument
710 ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, void *data, int flag) argument
769 struct ldlm_lock *lock; local
790 struct ldlm_lock *lock; local
805 struct ldlm_lock *lock; local
[all...]
H A Dldlm_request.c40 * An AST is a callback issued on a lock when its state is changed. There are
41 * several different types of ASTs (callbacks) registered for each lock:
43 * - completion AST: when a lock is enqueued by some process, but cannot be
45 * the completion AST is sent to notify the caller when the lock is
48 * - blocking AST: when a lock is granted to some process, if another process
49 * enqueues a conflicting (blocking) lock on a resource, a blocking AST is
50 * sent to notify the holder(s) of the lock(s) of the conflicting lock
51 * request. The lock holder(s) must release their lock(
93 struct ldlm_lock *lock = lwd->lwd_lock; local
137 ldlm_get_enq_timeout(struct ldlm_lock *lock) argument
154 ldlm_completion_tail(struct ldlm_lock *lock) argument
181 ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data) argument
222 ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data) argument
309 ldlm_blocking_ast_nocheck(struct ldlm_lock *lock) argument
347 ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, void *data, int flag) argument
373 ldlm_glimpse_ast(struct ldlm_lock *lock, void *reqp) argument
411 struct ldlm_lock *lock; local
465 failed_lock_cleanup(struct ldlm_namespace *ns, struct ldlm_lock *lock, int mode) argument
519 struct ldlm_lock *lock; local
858 struct ldlm_lock *lock; local
983 ldlm_cli_convert_local(struct ldlm_lock *lock, int new_mode, __u32 *flags) argument
1015 struct ldlm_lock *lock; local
1091 ldlm_cli_cancel_local(struct ldlm_lock *lock) argument
1135 struct ldlm_lock *lock; local
1318 struct ldlm_lock *lock; local
1365 struct ldlm_lock *lock, *next; local
1412 ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock, int unused, int added, int count) argument
1447 ldlm_cancel_lrur_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock, int unused, int added, int count) argument
1486 ldlm_cancel_passed_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock, int unused, int added, int count) argument
1506 ldlm_cancel_aged_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock, int unused, int added, int count) argument
1527 ldlm_cancel_default_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock, int unused, int added, int count) argument
1601 struct ldlm_lock *lock, *next; local
1775 struct ldlm_lock *lock; local
1834 struct ldlm_lock *lock; local
1970 struct ldlm_lock *lock; local
2014 ldlm_iter_helper(struct ldlm_lock *lock, void *closure) argument
2076 ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure) argument
2100 struct ldlm_lock *lock; local
2150 replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock) argument
2264 struct ldlm_lock *lock, *next; local
[all...]
H A Dldlm_lockd.c127 int ldlm_del_waiting_lock(struct ldlm_lock *lock) argument
132 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout) argument
145 struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
149 LDLM_DEBUG(lock, "client blocking AST callback handler");
151 lock_res_and_lock(lock);
152 lock->l_flags |= LDLM_FL_CBPENDING;
154 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
155 lock->l_flags |= LDLM_FL_CANCEL;
157 do_ast = (!lock->l_readers && !lock
144 ldlm_handle_bl_callback(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct ldlm_lock *lock) argument
180 ldlm_handle_cp_callback(struct ptlrpc_request *req, struct ldlm_namespace *ns, struct ldlm_request *dlm_req, struct ldlm_lock *lock) argument
330 ldlm_handle_gl_callback(struct ptlrpc_request *req, struct ldlm_namespace *ns, struct ldlm_request *dlm_req, struct ldlm_lock *lock) argument
405 init_blwi(struct ldlm_bl_work_item *blwi, struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct list_head *cancels, int count, struct ldlm_lock *lock, ldlm_cancel_flags_t cancel_flags) argument
440 ldlm_bl_to_thread(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct ldlm_lock *lock, struct list_head *cancels, int count, ldlm_cancel_flags_t cancel_flags) argument
471 ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct ldlm_lock *lock) argument
563 struct ldlm_lock *lock; local
913 struct ldlm_lock *lock; local
922 struct ldlm_lock *lock; local
943 struct ldlm_lock *lock; local
952 struct ldlm_lock *lock; local
[all...]
/drivers/acpi/acpica/
H A Dutlock.c3 * Module Name: utlock - Reader/Writer lock interfaces
55 * PARAMETERS: lock - Pointer to a valid RW lock
59 * DESCRIPTION: Reader/writer lock creation and deletion interfaces.
62 acpi_status acpi_ut_create_rw_lock(struct acpi_rw_lock *lock) argument
66 lock->num_readers = 0;
67 status = acpi_os_create_mutex(&lock->reader_mutex);
72 status = acpi_os_create_mutex(&lock->writer_mutex);
76 void acpi_ut_delete_rw_lock(struct acpi_rw_lock *lock) argument
79 acpi_os_delete_mutex(lock
105 acpi_ut_acquire_read_lock(struct acpi_rw_lock *lock) argument
127 acpi_ut_release_read_lock(struct acpi_rw_lock *lock) argument
163 acpi_ut_acquire_write_lock(struct acpi_rw_lock *lock) argument
171 acpi_ut_release_write_lock(struct acpi_rw_lock *lock) argument
[all...]
/drivers/staging/lustre/lustre/include/linux/
H A Dobd.h61 spinlock_t lock; member in struct:__anon6535
69 static inline void __client_obd_list_lock(client_obd_lock_t *lock, argument
74 if (spin_trylock(&lock->lock)) {
75 LASSERT(lock->task == NULL);
76 lock->task = current;
77 lock->func = func;
78 lock->line = line;
79 lock->time = jiffies;
84 time_before(lock
109 client_obd_list_unlock(client_obd_lock_t *lock) argument
118 client_obd_list_lock_init(client_obd_lock_t *lock) argument
123 client_obd_list_lock_done(client_obd_lock_t *lock) argument
[all...]
/drivers/staging/lustre/lustre/obdclass/
H A Dcl_lock.c71 * Basic lock invariant that is maintained at all times. Caller either has a
72 * reference to \a lock, or somehow assures that \a lock cannot be freed.
77 const struct cl_lock *lock)
79 return ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
80 atomic_read(&lock->cll_ref) >= lock->cll_holds &&
81 lock->cll_holds >= lock
76 cl_lock_invariant_trusted(const struct lu_env *env, const struct cl_lock *lock) argument
92 cl_lock_invariant(const struct lu_env *env, const struct cl_lock *lock) argument
107 cl_lock_nesting(const struct cl_lock *lock) argument
115 cl_lock_counters(const struct lu_env *env, const struct cl_lock *lock) argument
127 cl_lock_trace0(int level, const struct lu_env *env, const char *prefix, const struct cl_lock *lock, const char *func, const int line) argument
149 cl_lock_lockdep_init(struct cl_lock *lock) argument
154 cl_lock_lockdep_acquire(const struct lu_env *env, struct cl_lock *lock, __u32 enqflags) argument
161 cl_lock_lockdep_release(const struct lu_env *env, struct cl_lock *lock) argument
170 cl_lock_lockdep_init(struct cl_lock *lock) argument
172 cl_lock_lockdep_acquire(const struct lu_env *env, struct cl_lock *lock, __u32 enqflags) argument
175 cl_lock_lockdep_release(const struct lu_env *env, struct cl_lock *lock) argument
190 cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice, struct cl_object *obj, const struct cl_lock_operations *ops) argument
249 cl_lock_free(const struct lu_env *env, struct cl_lock *lock) argument
284 cl_lock_put(const struct lu_env *env, struct cl_lock *lock) argument
313 cl_lock_get(struct cl_lock *lock) argument
331 cl_lock_get_trust(struct cl_lock *lock) argument
346 cl_lock_finish(const struct lu_env *env, struct cl_lock *lock) argument
360 struct cl_lock *lock; local
407 cl_lock_intransit(const struct lu_env *env, struct cl_lock *lock) argument
427 cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock, enum cl_lock_state state) argument
444 cl_lock_is_intransit(struct cl_lock *lock) argument
456 cl_lock_fits_into(const struct lu_env *env, const struct cl_lock *lock, const struct cl_lock_descr *need, const struct cl_io *io) argument
477 struct cl_lock *lock; local
519 struct cl_lock *lock; local
567 struct cl_lock *lock; local
616 cl_lock_at(const struct cl_lock *lock, const struct lu_device_type *dtype) argument
631 cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock) argument
652 cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock) argument
691 cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock) argument
718 cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock) argument
743 cl_lock_is_mutexed(struct cl_lock *lock) argument
770 cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock) argument
786 cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock) argument
838 cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock, int delta) argument
857 cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock, int delta) argument
872 cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock, const char *scope, const void *source) argument
924 cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock) argument
970 cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock, enum cl_lock_state state) argument
991 cl_lock_signal(const struct lu_env *env, struct cl_lock *lock) argument
1008 cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock, enum cl_lock_state state) argument
1029 cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock) argument
1062 cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic) argument
1120 cl_enqueue_kick(const struct lu_env *env, struct cl_lock *lock, struct cl_io *io, __u32 flags) argument
1153 cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock, struct cl_io *io, __u32 flags) argument
1210 cl_lock_enqueue_wait(const struct lu_env *env, struct cl_lock *lock, int keep_mutex) argument
1249 cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock, struct cl_io *io, __u32 enqflags) argument
1288 cl_enqueue(const struct lu_env *env, struct cl_lock *lock, struct cl_io *io, __u32 enqflags) argument
1317 cl_unuse_try(const struct lu_env *env, struct cl_lock *lock) argument
1383 cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock) argument
1395 cl_unuse(const struct lu_env *env, struct cl_lock *lock) argument
1414 cl_wait_try(const struct lu_env *env, struct cl_lock *lock) argument
1471 cl_wait(const struct lu_env *env, struct cl_lock *lock) argument
1506 cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock) argument
1538 cl_lock_modify(const struct lu_env *env, struct cl_lock *lock, const struct cl_lock_descr *desc) argument
1602 cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock, struct cl_lock_closure *closure) argument
1635 cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock, struct cl_lock_closure *closure) argument
1724 cl_lock_delete(const struct lu_env *env, struct cl_lock *lock) argument
1749 cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error) argument
1775 cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock) argument
1799 struct cl_lock *lock; local
1843 pgoff_at_lock(struct cl_page *page, struct cl_lock *lock) argument
1861 struct cl_lock *lock = cbdata; local
1897 struct cl_lock *lock = cbdata; local
1926 cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock) argument
1972 struct cl_lock *lock; local
2022 struct cl_lock *lock; local
2053 struct cl_lock *lock; local
2070 struct cl_lock *lock; local
2112 cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock, const char *scope, const void *source) argument
2130 cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock, const char *scope, const void *source) argument
2143 cl_lock_release(const struct lu_env *env, struct cl_lock *lock, const char *scope, const void *source) argument
2156 cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock) argument
2165 cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock) argument
2209 cl_lock_print(const struct lu_env *env, void *cookie, lu_printer_t printer, const struct cl_lock *lock) argument
[all...]
/drivers/gpu/drm/
H A Ddrm_lock.c54 * Add the current task to the lock wait queue, and attempt to take to lock.
60 struct drm_lock *lock = data; local
66 if (lock->context == DRM_KERNEL_CONTEXT) {
68 task_pid_nr(current), lock->context);
72 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
73 lock->context, task_pid_nr(current),
74 master->lock.hw_lock->lock, lock
153 struct drm_lock *lock = data; local
184 volatile unsigned int *lock = &lock_data->hw_lock->lock; local
233 volatile unsigned int *lock = &lock_data->hw_lock->lock; local
258 volatile unsigned int *lock = &lock_data->hw_lock->lock; local
298 struct drm_hw_lock *lock = dev->sigdata.lock; local
351 volatile unsigned int *lock = &lock_data->hw_lock->lock; local
[all...]
/drivers/gpu/drm/via/
H A Dvia_video.c40 XVMCLOCKPTR(dev_priv->sarea_priv, i)->lock = 0;
51 volatile int *lock; local
57 lock = (volatile int *)XVMCLOCKPTR(dev_priv->sarea_priv, i);
58 if ((_DRM_LOCKING_CONTEXT(*lock) == context)) {
59 if (_DRM_LOCK_IS_HELD(*lock)
60 && (*lock & _DRM_LOCK_CONT)) {
63 *lock = 0;
71 volatile int *lock; local
78 if (fx->lock >= VIA_NR_XVMC_LOCKS)
81 lock
[all...]
/drivers/hwspinlock/
H A Dhwspinlock_internal.h29 * @trylock: make a single attempt to take the lock. returns 0 on
31 * @unlock: release the lock. always succeed. may _not_ sleep.
33 * core while spinning on a lock, between two successive
37 int (*trylock)(struct hwspinlock *lock);
38 void (*unlock)(struct hwspinlock *lock);
39 void (*relax)(struct hwspinlock *lock);
44 * @bank: the hwspinlock_device structure which owns this lock
45 * @lock: initialized and used by hwspinlock core
50 spinlock_t lock; member in struct:hwspinlock
58 * @base_id: id index of the first lock i
67 struct hwspinlock lock[0]; member in struct:hwspinlock_device
[all...]
/drivers/staging/unisys/channels/
H A Dchanstub.h19 void *pSignal, spinlock_t *lock);
21 void *pSignal, spinlock_t *lock);
H A Dchanstub.c46 void *pSignal, spinlock_t *lock)
51 spin_lock_irqsave(lock, flags);
53 spin_unlock_irqrestore(lock, flags);
59 void *pSignal, spinlock_t *lock)
63 spin_lock(lock);
65 spin_unlock(lock);
45 SignalInsert_withLock(CHANNEL_HEADER __iomem *pChannel, u32 Queue, void *pSignal, spinlock_t *lock) argument
58 SignalRemove_withLock(CHANNEL_HEADER __iomem *pChannel, u32 Queue, void *pSignal, spinlock_t *lock) argument
/drivers/md/persistent-data/
H A Ddm-block-manager.c28 * trace is also emitted for the previous lock acquisition.
38 spinlock_t lock; member in struct:block_lock
55 static unsigned __find_holder(struct block_lock *lock, argument
61 if (lock->holders[i] == task)
68 /* call this *after* you increment lock->count */
69 static void __add_holder(struct block_lock *lock, struct task_struct *task) argument
71 unsigned h = __find_holder(lock, NULL);
77 lock->holders[h] = task;
80 t = lock->traces + h;
83 t->entries = lock
90 __del_holder(struct block_lock *lock, struct task_struct *task) argument
97 __check_holder(struct block_lock *lock) argument
155 __wake_many(struct block_lock *lock) argument
180 bl_init(struct block_lock *lock) argument
191 __available_for_read(struct block_lock *lock) argument
198 bl_down_read(struct block_lock *lock) argument
229 bl_down_read_nonblock(struct block_lock *lock) argument
250 bl_up_read(struct block_lock *lock) argument
261 bl_down_write(struct block_lock *lock) argument
297 bl_up_write(struct block_lock *lock) argument
342 struct block_lock lock; member in struct:buffer_aux
[all...]
/drivers/clk/mmp/
H A Dclk-apbc.c33 spinlock_t *lock; member in struct:clk_apbc
46 if (apbc->lock)
47 spin_lock_irqsave(apbc->lock, flags);
55 if (apbc->lock)
56 spin_unlock_irqrestore(apbc->lock, flags);
60 if (apbc->lock)
61 spin_lock_irqsave(apbc->lock, flags);
67 if (apbc->lock)
68 spin_unlock_irqrestore(apbc->lock, flags);
73 if (apbc->lock)
123 mmp_clk_register_apbc(const char *name, const char *parent_name, void __iomem *base, unsigned int delay, unsigned int apbc_flags, spinlock_t *lock) argument
[all...]
H A Dclk-apmu.c27 spinlock_t *lock; member in struct:clk_apmu
36 if (apmu->lock)
37 spin_lock_irqsave(apmu->lock, flags);
42 if (apmu->lock)
43 spin_unlock_irqrestore(apmu->lock, flags);
54 if (apmu->lock)
55 spin_lock_irqsave(apmu->lock, flags);
60 if (apmu->lock)
61 spin_unlock_irqrestore(apmu->lock, flags);
70 void __iomem *base, u32 enable_mask, spinlock_t *lock)
69 mmp_clk_register_apmu(const char *name, const char *parent_name, void __iomem *base, u32 enable_mask, spinlock_t *lock) argument
[all...]
/drivers/thermal/
H A Duser_space.c37 mutex_lock(&tz->lock);
39 mutex_unlock(&tz->lock);
/drivers/clk/berlin/
H A Dberlin2-div.c68 spinlock_t *lock; member in struct:berlin2_div
81 if (div->lock)
82 spin_lock(div->lock);
87 if (div->lock)
88 spin_unlock(div->lock);
99 if (div->lock)
100 spin_lock(div->lock);
106 if (div->lock)
107 spin_unlock(div->lock);
118 if (div->lock)
238 berlin2_div_register(const struct berlin2_div_map *map, void __iomem *base, const char *name, u8 div_flags, const char **parent_names, int num_parents, unsigned long flags, spinlock_t *lock) argument
[all...]
/drivers/base/
H A Dmap.c26 int (*lock)(dev_t, void *); member in struct:kobj_map::probe
29 struct mutex *lock; member in struct:kobj_map
34 int (*lock)(dev_t, void *), void *data)
52 p->lock = lock;
57 mutex_lock(domain->lock);
65 mutex_unlock(domain->lock);
79 mutex_lock(domain->lock);
92 mutex_unlock(domain->lock);
103 mutex_lock(domain->lock);
32 kobj_map(struct kobj_map *domain, dev_t dev, unsigned long range, struct module *module, kobj_probe_t *probe, int (*lock)(dev_t, void *), void *data) argument
136 kobj_map_init(kobj_probe_t *base_probe, struct mutex *lock) argument
[all...]
/drivers/gpu/drm/nouveau/
H A Dnv10_fence.h16 spinlock_t lock; member in struct:nv10_fence_priv
/drivers/tty/serial/8250/
H A D8250_fsl.c33 spin_lock_irqsave(&up->port.lock, flags);
37 spin_unlock_irqrestore(&up->port.lock, flags);
45 spin_unlock_irqrestore(&up->port.lock, flags);
60 spin_unlock_irqrestore(&up->port.lock, flags);
/drivers/usb/gadget/function/
H A Du_ecm.h32 struct mutex lock; member in struct:f_ecm_opts
H A Du_eem.h32 struct mutex lock; member in struct:f_eem_opts
H A Du_ether_configfs.h42 mutex_lock(&opts->lock); \
44 mutex_unlock(&opts->lock); \
54 mutex_lock(&opts->lock); \
56 mutex_unlock(&opts->lock); \
61 mutex_unlock(&opts->lock); \
78 mutex_lock(&opts->lock); \
80 mutex_unlock(&opts->lock); \
90 mutex_lock(&opts->lock); \
92 mutex_unlock(&opts->lock); \
97 mutex_unlock(&opts->lock); \
[all...]

Completed in 5208 milliseconds

1234567891011>>