Lines Matching defs:self

38 	ulwp_t *self = curthread;
39 uberdata_t *udp = self->ul_uberdata;
51 } else if (ulwp == self) {
53 * Unlock self before cancelling.
55 ulwp_unlock(self, udp);
56 self->ul_nocancel = 0; /* cancellation is now possible */
57 if (self->ul_sigdefer == 0)
60 self->ul_cancel_pending = 1;
61 set_cancel_pending_flag(self, 0);
94 ulwp_t *self = curthread;
95 uberdata_t *udp = self->ul_uberdata;
99 * Grab ulwp_lock(self) to protect the setting of ul_cancel_disabled
102 * defers SIGCANCEL until ulwp_unlock(self) when exit_critical()
103 * is called. (self->ul_cancel_pending is set in the SIGCANCEL
106 ulwp_lock(self, udp);
108 was_disabled = self->ul_cancel_disabled;
111 self->ul_cancel_disabled = 0;
114 self->ul_cancel_disabled = 1;
117 ulwp_unlock(self, udp);
120 set_cancel_pending_flag(self, 0);
126 if ((!self->ul_cancel_disabled || !was_disabled) &&
127 self->ul_cancel_async && self->ul_cancel_pending) {
128 ulwp_unlock(self, udp);
132 ulwp_unlock(self, udp);
151 ulwp_t *self = curthread;
159 enter_critical(self);
161 was_async = self->ul_cancel_async;
164 self->ul_cancel_async = 1;
167 self->ul_cancel_async = 0;
170 exit_critical(self);
173 self->ul_save_async = self->ul_cancel_async;
179 if ((self->ul_cancel_async || was_async) &&
180 self->ul_cancel_pending && !self->ul_cancel_disabled) {
181 exit_critical(self);
185 exit_critical(self);
205 ulwp_t *self = curthread;
207 if (self->ul_cancel_pending && !self->ul_cancel_disabled)
219 ulwp_t *self = curthread;
221 ASSERT(!(self->ul_cancelable && self->ul_cancel_disabled));
222 if (!self->ul_cancel_disabled) {
223 ASSERT(self->ul_cancelable >= 0);
224 self->ul_cancelable++;
225 if (self->ul_cancel_pending)
237 ulwp_t *self = curthread;
239 ASSERT(!(self->ul_cancelable && self->ul_cancel_disabled));
240 if (!self->ul_cancel_disabled) {
241 if (self->ul_cancel_pending)
243 self->ul_cancelable--;
244 ASSERT(self->ul_cancelable >= 0);
255 ulwp_t *self = curthread;
257 ASSERT(!(self->ul_cancelable && self->ul_cancel_disabled));
258 if (!self->ul_cancel_disabled) {
259 self->ul_cancelable--;
260 ASSERT(self->ul_cancelable >= 0);
274 ulwp_t *self = curthread;
280 infop->next = self->ul_clnup_hdr;
281 self->ul_clnup_hdr = infop;
293 ulwp_t *self = curthread;
294 __cleanup_t *infop = self->ul_clnup_hdr;
296 self->ul_clnup_hdr = infop->next;
302 * Called when either self->ul_cancel_disabled or self->ul_cancel_pending
312 set_cancel_pending_flag(ulwp_t *self, int clear_flags)
316 if (self->ul_vfork | self->ul_nocancel)
318 enter_critical(self);
319 if ((scp = self->ul_schedctl) != NULL ||
323 else if (self->ul_cancel_pending && !self->ul_cancel_disabled)
328 exit_critical(self);
338 set_cancel_eintr_flag(ulwp_t *self)
342 if (self->ul_vfork | self->ul_nocancel)
344 enter_critical(self);
345 if ((scp = self->ul_schedctl) != NULL ||
348 exit_critical(self);
360 set_parking_flag(ulwp_t *self, int park)
364 enter_critical(self);
365 if ((scp = self->ul_schedctl) != NULL ||
380 if (self->ul_cancel_pending &&
381 !self->ul_cancel_disabled)
385 (void) __lwp_unpark(self->ul_lwpid);
387 exit_critical(self);
396 ulwp_t *self = curthread;
406 enter_critical(self);
408 (((scp = self->ul_schedctl) != NULL ||
412 exit_critical(self);