diff --git a/driver/cyclic.c b/driver/cyclic.c index a2c717a..7af6dec 100644 --- a/driver/cyclic.c +++ b/driver/cyclic.c @@ -2863,7 +2863,7 @@ cyclic_mp_init() { cpu_t *c; - dmutex_enter(&cpu_lock); + mutex_enter(&cpu_lock); c = cpu_list; do { @@ -2874,7 +2874,7 @@ cyclic_mp_init() } while ((c = c->cpu_next) != cpu_list); register_cpu_setup_func((cpu_setup_func_t *)cyclic_cpu_setup, NULL); - dmutex_exit(&cpu_lock); + mutex_exit(&cpu_lock); } /* diff --git a/driver/dtrace.c b/driver/dtrace.c index f81a0fc..1071891 100644 --- a/driver/dtrace.c +++ b/driver/dtrace.c @@ -211,7 +211,6 @@ static struct notifier_block n_module_load = { /**********************************************************************/ # define FAST_PROBE_TEARDOWN 0 static unsigned long long cnt_free1; -static volatile int lock_teardown = -1; # endif /* @@ -298,9 +297,9 @@ static int dtrace_dynvar_failclean; /* dynvars failed to clean */ * mod_lock is similar with respect to dtrace_provider_lock in that it must be * acquired _between_ dtrace_provider_lock and dtrace_lock. */ -MUTEX_DEFINE(dtrace_lock); /* probe state lock */ -MUTEX_DEFINE(dtrace_provider_lock); /* provider state lock */ -MUTEX_DEFINE(dtrace_meta_lock); /* meta-provider state lock */ +DEFINE_MUTEX(dtrace_lock); /* probe state lock */ +DEFINE_MUTEX(dtrace_provider_lock); /* provider state lock */ +DEFINE_MUTEX(dtrace_meta_lock); /* meta-provider state lock */ /* * DTrace Provider Variables @@ -369,7 +368,7 @@ int dtrace_helptrace_enabled = 0; static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; static const char *dtrace_errlast; static kthread_t *dtrace_errthread; -static MUTEX_DEFINE(dtrace_errlock); +static DEFINE_MUTEX(dtrace_errlock); #endif /* @@ -6223,157 +6222,6 @@ dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, TODO_END(); } -#if linux -/**********************************************************************/ -/* The following is a locking wrapper around dtrace_probe. We need */ -/* to be careful of the CPU invoking dtrace_probe() from an */ -/* interrupt routine, whilst processing a probe. It looks like */ -/* Apple does the same trick, with a comment in the Darwin source */ -/* to the effect that Solaris can guarantee no reentrancy. */ -/* */ -/* We allow error probes through since this is naturally */ -/* recursive. What we have detected is a timer interrupt invoking */ -/* dtrace, so this is definitely a no-no: we could deadlock on a */ -/* mutex and not recover. */ -/* */ -/* Looks like we need to disable interrupts when doing syscall */ -/* tracing. (FBT tracing should already have interrupts disabled). */ -/* */ -/* In any case, FBT tracing could be reentrant if we are not clean */ -/* in avoiding external kernel dependency in any of the */ -/* subroutines called from here. The interrupt routines attempt to */ -/* detect this, but obviously this isnt strong enough (see */ -/* dtrace_linux.c), since we can crash the kernel without this */ -/* protective wrapper. */ -/* */ -/* Note that we do simple "locked" assignments, rather than atomic */ -/* inc/dec. The probability of a clashing interrupt is very low - */ -/* we and we must do none-blocking checks. */ -/**********************************************************************/ -unsigned long cnt_probe_recursion; -unsigned long long cnt_probe_noint; -unsigned long long cnt_probe_safe; -int dtrace_safe; - -void -dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, - uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) -{ - void __dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, - uintptr_t arg2, uintptr_t arg3, uintptr_t arg4); - int cpu; - cpu_core_t *cpup; - - /***********************************************/ - /* Allow us to disable dtrace as soon as we */ - /* come across a consistency error, so we */ - /* can look at the latest trace info. */ - /***********************************************/ - if (dtrace_shutdown) - return; - - /***********************************************/ - /* If interrupts are disabled and we are in */ - /* safe-mode, then we could deadlock, e.g. */ - /* for Xen or where we have to use */ - /* smp_call_function() on a dtrace_xcall(). */ - /* So, default to safe. */ - /* At the moment, this isnt firing which is */ - /* strange. Maybe the cpuc_regs isnt set up */ - /* (should be, since its in */ - /* dtrace_int[13]_handler. */ - /***********************************************/ - cpu = cpu_get_id(); - - /***********************************************/ - /* cpuc_regs may be null until we get the */ - /* first probe. Silly me. */ - /***********************************************/ - cpup = cpu_get_this(); - if (cpup && cpup->cpuc_regs && cpup->cpuc_regs->r_rfl & X86_EFLAGS_IF) { - if (dtrace_safe) { - cnt_probe_safe++; - return; - } - cnt_probe_noint++; - } - - /***********************************************/ - /* If someone else is tearing down, and its */ - /* us, then just drop the probe. */ - /***********************************************/ - if (lock_teardown >= 0 && lock_teardown == smp_processor_id()) { - //dtrace_printf("->%s\n", dtrace_probes[id-1]->dtpr_func); - return; - } - - /***********************************************/ - /* Is some other cpu tearing down? Try and */ - /* slow down a bit so we arent swamping */ - /* with new probes and causing lots of */ - /* xcall delays. */ - /***********************************************/ - if (lock_teardown >= 0) { - unsigned long cnt = 0; - //dtrace_printf("->%s\n", dtrace_probes[id-1]->dtpr_func); - /***********************************************/ - /* Slow down a bit, and keep seeing if we */ - /* can drain any pending xcalls. */ - /***********************************************/ - while (lock_teardown >= 0) { - extern void xcall_slave2(void); - xcall_slave2(); - if (cnt++ >= smp_processor_id() * 10/*000*/) { - break; - } - } - } - - if (id == dtrace_probeid_error) { - __dtrace_probe(id, arg0, arg1, arg2, arg3, arg4); - return; - } - - /***********************************************/ - /* If we ever get a reentrant lock, we are */ - /* a bit hosed - because this should never */ - /* happen, and if it did, it potentially */ - /* means dtrace_probe() never returned (eg */ - /* a page fault type trap). Given this */ - /* scenario, then we can never call it */ - /* again. Ideally we should reset the lock */ - /* occasionally. (Losing the lock means no */ - /* more dtrace_probes will fire). */ - /* */ - /* Note that the timer interrupt will */ - /* occasionally get a look in, but we dont */ - /* mind dropping the odd timer tick. */ - /***********************************************/ - if (cpu_core[cpu].cpuc_probe_level) { - /***********************************************/ - /* Avoid flooding the console or syslogd. */ - /***********************************************/ - cnt_probe_recursion++; - if (1) { - if (cnt_probe_recursion < 10) { - dtrace_printf("dtrace_probe: re-entrancy: old=%d this=%d [#%lu]\n", - (int) cpu_core[cpu].cpuc_this_probe, - (int) id, - cnt_probe_recursion); -// dump_stack(); - } - return; - } - } -//asm("cli\n"); - cpu_core[cpu].cpuc_probe_level++; - cpu_core[cpu].cpuc_this_probe = id; - __dtrace_probe(id, arg0, arg1, arg2, arg3, arg4); - cpu_core[cpu].cpuc_probe_level--; - -} -#define dtrace_probe __dtrace_probe -#endif /* * If you're looking for the epicenter of DTrace, you just found it. This * is the function called by the provider to fire a probe -- from which all @@ -7322,7 +7170,7 @@ dtrace_errdebug(const char *str) int hval = dtrace_hash_str((char *)str) % DTRACE_ERRHASHSZ; int occupied = 0; - dmutex_enter(&dtrace_errlock); + mutex_enter(&dtrace_errlock); dtrace_errlast = str; dtrace_errthread = curthread; @@ -7344,7 +7192,7 @@ dtrace_errdebug(const char *str) panic("dtrace: undersized error hash"); out: - dmutex_exit(&dtrace_errlock); + mutex_exit(&dtrace_errlock); } #endif @@ -7909,9 +7757,9 @@ HERE(); RETURN(EBUSY); } } else { - dmutex_enter(&dtrace_provider_lock); - dmutex_enter(&mod_lock); - dmutex_enter(&dtrace_lock); + mutex_enter(&dtrace_provider_lock); + mutex_enter(&mod_lock); + mutex_enter(&dtrace_lock); } /* @@ -7923,9 +7771,9 @@ HERE(); (dtrace_opens || (dtrace_anon.dta_state != NULL && dtrace_anon.dta_state->dts_necbs > 0))) { if (!self) { - dmutex_exit(&dtrace_lock); - dmutex_exit(&mod_lock); - dmutex_exit(&dtrace_provider_lock); + mutex_exit(&dtrace_lock); + mutex_exit(&mod_lock); + mutex_exit(&dtrace_provider_lock); } RETURN(EBUSY); } @@ -7957,9 +7805,9 @@ HERE(); noreap = 1; if (!self) { - dmutex_exit(&dtrace_lock); - dmutex_exit(&mod_lock); - dmutex_exit(&dtrace_provider_lock); + mutex_exit(&dtrace_lock); + mutex_exit(&mod_lock); + mutex_exit(&dtrace_provider_lock); } if (noreap) RETURN(EBUSY); @@ -8032,9 +7880,9 @@ HERE(); } if (!self) { - dmutex_exit(&dtrace_lock); - dmutex_exit(&mod_lock); - dmutex_exit(&dtrace_provider_lock); + mutex_exit(&dtrace_lock); + mutex_exit(&mod_lock); + mutex_exit(&dtrace_provider_lock); } kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); @@ -8055,13 +7903,13 @@ dtrace_invalidate(dtrace_provider_id_t id) ASSERT(pvp->dtpv_pops.dtps_enable != (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop); - dmutex_enter(&dtrace_provider_lock); - dmutex_enter(&dtrace_lock); + mutex_enter(&dtrace_provider_lock); + mutex_enter(&dtrace_lock); pvp->dtpv_defunct = dtrace_gethrtime(); - dmutex_exit(&dtrace_lock); - dmutex_exit(&dtrace_provider_lock); + mutex_exit(&dtrace_lock); + mutex_exit(&dtrace_provider_lock); } /* @@ -8096,8 +7944,8 @@ dtrace_condense(dtrace_provider_id_t id) ASSERT(prov->dtpv_pops.dtps_enable != (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop); - dmutex_enter(&dtrace_provider_lock); - dmutex_enter(&dtrace_lock); + mutex_enter(&dtrace_provider_lock); + mutex_enter(&dtrace_lock); /* * Attempt to destroy the probes associated with this provider. @@ -8127,8 +7975,8 @@ dtrace_condense(dtrace_provider_id_t id) vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); } - dmutex_exit(&dtrace_lock); - dmutex_exit(&dtrace_provider_lock); + mutex_exit(&dtrace_lock); + mutex_exit(&dtrace_provider_lock); return (0); } @@ -8273,10 +8121,10 @@ dtrace_probe_lookup(dtrace_provider_id_t prid, const char *mod, pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; pkey.dtpk_id = DTRACE_IDNONE; - dmutex_enter(&dtrace_lock); + mutex_enter(&dtrace_lock); match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, dtrace_probe_lookup_match, &id); - dmutex_exit(&dtrace_lock); + mutex_exit(&dtrace_lock); ASSERT(match == 1 || match == 0); return (match ? id : 0); @@ -8291,13 +8139,13 @@ dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) dtrace_probe_t *probe; void *rval = NULL; - dmutex_enter(&dtrace_lock); + mutex_enter(&dtrace_lock); if ((probe = dtrace_probe_lookup_id(pid)) != NULL && probe->dtpr_provider == (dtrace_provider_t *)id) rval = probe->dtpr_arg; - dmutex_exit(&dtrace_lock); + mutex_exit(&dtrace_lock); return (rval); } @@ -8369,7 +8217,7 @@ dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) * that this also prevents the mod_busy bits from changing. * (mod_busy can only be changed with mod_lock held.) */ - dmutex_enter(&mod_lock); + mutex_enter(&mod_lock); # if defined(sun) {struct modctl *ctl = &modules; @@ -8404,7 +8252,7 @@ dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) } } # endif - dmutex_exit(&mod_lock); + mutex_exit(&mod_lock); } while (all && (prv = prv->dtpv_next) != NULL); //HERE(); } @@ -8715,12 +8563,12 @@ dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, (void) strcpy(meta->dtm_name, name); meta->dtm_arg = arg; - dmutex_enter(&dtrace_meta_lock); - dmutex_enter(&dtrace_lock); + mutex_enter(&dtrace_meta_lock); + mutex_enter(&dtrace_lock); if (dtrace_meta_pid != NULL) { - dmutex_exit(&dtrace_lock); - dmutex_exit(&dtrace_meta_lock); + mutex_exit(&dtrace_lock); + mutex_exit(&dtrace_meta_lock); cmn_err(CE_WARN, "failed to register meta-register %s: " "user-land meta-provider exists", name); kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); @@ -8739,7 +8587,7 @@ dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, help = dtrace_deferred_pid; dtrace_deferred_pid = NULL; - dmutex_exit(&dtrace_lock); + mutex_exit(&dtrace_lock); while (help != NULL) { for (i = 0; i < help->dthps_nprovs; i++) { @@ -8754,7 +8602,7 @@ dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, help = next; } - dmutex_exit(&dtrace_meta_lock); + mutex_exit(&dtrace_meta_lock); return (0); } @@ -8764,8 +8612,8 @@ dtrace_meta_unregister(dtrace_meta_provider_id_t id) { dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; - dmutex_enter(&dtrace_meta_lock); - dmutex_enter(&dtrace_lock); + mutex_enter(&dtrace_meta_lock); + mutex_enter(&dtrace_lock); if (old == dtrace_meta_pid) { pp = &dtrace_meta_pid; @@ -8775,15 +8623,15 @@ dtrace_meta_unregister(dtrace_meta_provider_id_t id) } if (old->dtm_count != 0) { - dmutex_exit(&dtrace_lock); - dmutex_exit(&dtrace_meta_lock); + mutex_exit(&dtrace_lock); + mutex_exit(&dtrace_meta_lock); RETURN(EBUSY); } *pp = NULL; - dmutex_exit(&dtrace_lock); - dmutex_exit(&dtrace_meta_lock); + mutex_exit(&dtrace_lock); + mutex_exit(&dtrace_meta_lock); kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); kmem_free(old, sizeof (dtrace_meta_t)); @@ -10902,16 +10750,7 @@ dtrace_ecb_destroy(dtrace_ecb_t *ecb) ASSERT(state->dts_ecbs[epid - 1] == ecb); state->dts_ecbs[epid - 1] = NULL; - /***********************************************/ - /* Mark us as the teardown leader, and keep */ - /* track of how many probes as we started. */ - /* avoid dtrace_sync calls when tearing */ - /* down a large number of probes. */ - /***********************************************/ - if (lock_teardown < 0) { - cnt_free1 = cnt_probes; - lock_teardown = smp_processor_id(); - } + cnt_free1 = cnt_probes; #if linux /***********************************************/ @@ -12055,9 +11894,9 @@ dtrace_enabling_provide(dtrace_provider_t *prv) enab = enab->dten_next) { for (i = 0; i < enab->dten_ndesc; i++) { desc = enab->dten_desc[i]->dted_probe; - dmutex_exit(&dtrace_lock); + mutex_exit(&dtrace_lock); prv->dtpv_pops.dtps_provide(parg, &desc); - dmutex_enter(&dtrace_lock); + mutex_enter(&dtrace_lock); /* * Process the retained enablings again if * they have changed while we weren't holding @@ -12069,9 +11908,9 @@ dtrace_enabling_provide(dtrace_provider_t *prv) } } while (all && (prv = prv->dtpv_next) != NULL); - dmutex_exit(&dtrace_lock); + mutex_exit(&dtrace_lock); dtrace_probe_provide(NULL, all ? NULL : prv); - dmutex_enter(&dtrace_lock); + mutex_enter(&dtrace_lock); } /* @@ -14231,11 +14070,6 @@ dtrace_printf("teardown start xcalls=%lu probes=%llu\n", cnt_xcall1, cnt_probes break; } - /***********************************************/ - /* Exit the 'critical' region for teardown. */ - /***********************************************/ - lock_teardown = -1; - /***********************************************/ /* Dump some stats on how long the teardown */ /* took. */ @@ -14669,22 +14503,22 @@ dtrace_helper_destroygen(int gen) help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; help->dthps_provs[help->dthps_nprovs] = NULL; - dmutex_exit(&dtrace_lock); + mutex_exit(&dtrace_lock); /* * If we have a meta provider, remove this helper provider. */ - dmutex_enter(&dtrace_meta_lock); + mutex_enter(&dtrace_meta_lock); if (dtrace_meta_pid != NULL) { ASSERT(dtrace_deferred_pid == NULL); dtrace_helper_provider_remove(&prov->dthp_prov, p->p_pid); } - dmutex_exit(&dtrace_meta_lock); + mutex_exit(&dtrace_meta_lock); dtrace_helper_provider_destroy(prov); - dmutex_enter(&dtrace_lock); + mutex_enter(&dtrace_lock); } return (0); @@ -14788,8 +14622,8 @@ dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, { ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); - dmutex_enter(&dtrace_meta_lock); - dmutex_enter(&dtrace_lock); + mutex_enter(&dtrace_meta_lock); + mutex_enter(&dtrace_lock); HERE(); //printk("dtrace_meta_pid=%p dtrace_provider=%p p_pid=%d\n", dtrace_meta_pid, dtrace_provider, p->p_pid); @@ -14813,7 +14647,7 @@ HERE(); dtrace_deferred_pid = help; } - dmutex_exit(&dtrace_lock); + mutex_exit(&dtrace_lock); } else if (dofhp != NULL) { /* @@ -14823,7 +14657,7 @@ HERE(); */ HERE(); - dmutex_exit(&dtrace_lock); + mutex_exit(&dtrace_lock); dtrace_helper_provide(dofhp, p->p_pid); @@ -14834,7 +14668,7 @@ HERE(); */ int i; - dmutex_exit(&dtrace_lock); + mutex_exit(&dtrace_lock); HERE(); for (i = 0; i < help->dthps_nprovs; i++) { @@ -14844,7 +14678,7 @@ HERE(); } } - dmutex_exit(&dtrace_meta_lock); + mutex_exit(&dtrace_meta_lock); HERE(); } @@ -14922,16 +14756,16 @@ HERE(); static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) { - dmutex_enter(&dtrace_lock); + mutex_enter(&dtrace_lock); if (--hprov->dthp_ref == 0) { dof_hdr_t *dof; - dmutex_exit(&dtrace_lock); + mutex_exit(&dtrace_lock); dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; dtrace_dof_destroy(dof); kmem_free(hprov, sizeof (dtrace_helper_provider_t)); } else { - dmutex_exit(&dtrace_lock); + mutex_exit(&dtrace_lock); } } @@ -15258,10 +15092,10 @@ HERE(); dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; HERE(); if (dtrace_helper_provider_add(dhp, gen) == 0) { - dmutex_exit(&dtrace_lock); + mutex_exit(&dtrace_lock); HERE(); dtrace_helper_provider_register(curproc, help, dhp); - dmutex_enter(&dtrace_lock); + mutex_enter(&dtrace_lock); destroy = 0; } @@ -15301,7 +15135,7 @@ dtrace_helpers_destroy(void) proc_t *p = curproc; int i; - dmutex_enter(&dtrace_lock); + mutex_enter(&dtrace_lock); ASSERT(p->p_dtrace_helpers != NULL); ASSERT(dtrace_helpers > 0); @@ -15328,13 +15162,13 @@ dtrace_helpers_destroy(void) } } - dmutex_exit(&dtrace_lock); + mutex_exit(&dtrace_lock); /* * Destroy the helper providers. */ if (help->dthps_maxprovs > 0) { - dmutex_enter(&dtrace_meta_lock); + mutex_enter(&dtrace_meta_lock); if (dtrace_meta_pid != NULL) { ASSERT(dtrace_deferred_pid == NULL); @@ -15343,7 +15177,7 @@ dtrace_helpers_destroy(void) &help->dthps_provs[i]->dthp_prov, p->p_pid); } } else { - dmutex_enter(&dtrace_lock); + mutex_enter(&dtrace_lock); ASSERT(help->dthps_deferred == 0 || help->dthps_next != NULL || help->dthps_prev != NULL || @@ -15361,10 +15195,10 @@ dtrace_helpers_destroy(void) ASSERT(help->dthps_prev == NULL); } - dmutex_exit(&dtrace_lock); + mutex_exit(&dtrace_lock); } - dmutex_exit(&dtrace_meta_lock); + mutex_exit(&dtrace_meta_lock); for (i = 0; i < help->dthps_nprovs; i++) { dtrace_helper_provider_destroy(help->dthps_provs[i]); @@ -15374,7 +15208,7 @@ dtrace_helpers_destroy(void) sizeof (dtrace_helper_provider_t *)); } - dmutex_enter(&dtrace_lock); + mutex_enter(&dtrace_lock); dtrace_vstate_fini(&help->dthps_vstate); kmem_free(help->dthps_actions, @@ -15382,7 +15216,7 @@ dtrace_helpers_destroy(void) kmem_free(help, sizeof (dtrace_helpers_t)); --dtrace_helpers; - dmutex_exit(&dtrace_lock); + mutex_exit(&dtrace_lock); } static void @@ -15394,7 +15228,7 @@ dtrace_helpers_duplicate(proc_t *from, proc_t *to) dtrace_vstate_t *vstate; int i, j, sz, hasprovs = 0; - dmutex_enter(&dtrace_lock); + mutex_enter(&dtrace_lock); ASSERT(from->p_dtrace_helpers != NULL); ASSERT(dtrace_helpers > 0); @@ -15461,7 +15295,7 @@ dtrace_helpers_duplicate(proc_t *from, proc_t *to) hasprovs = 1; } - dmutex_exit(&dtrace_lock); + mutex_exit(&dtrace_lock); if (hasprovs) dtrace_helper_provider_register(to, newhelp, NULL); @@ -15530,8 +15364,8 @@ dtrace_module_loaded(struct notifier_block *nb, unsigned long val, void *data) } #endif - dmutex_enter(&dtrace_provider_lock); - dmutex_enter(&mod_lock); + mutex_enter(&dtrace_provider_lock); + mutex_enter(&mod_lock); # if linux ASSERT(ctl->state == MODULE_STATE_LIVE); @@ -15546,8 +15380,8 @@ dtrace_module_loaded(struct notifier_block *nb, unsigned long val, void *data) for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); - dmutex_exit(&mod_lock); - dmutex_exit(&dtrace_provider_lock); + mutex_exit(&mod_lock); + mutex_exit(&dtrace_provider_lock); /* * If we have any retained enablings, we need to match against them. @@ -15557,17 +15391,17 @@ dtrace_module_loaded(struct notifier_block *nb, unsigned long val, void *data) * classes.) So if we have any retained enablings, we need to dispatch * our task queue to do the match for us. */ - dmutex_enter(&dtrace_lock); + mutex_enter(&dtrace_lock); if (dtrace_retained == NULL) { - dmutex_exit(&dtrace_lock); + mutex_exit(&dtrace_lock); return NOTIFY_DONE; } (void) taskq_dispatch(dtrace_taskq, (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); - dmutex_exit(&dtrace_lock); + mutex_exit(&dtrace_lock); /* * And now, for a little heuristic sleaze: in general, we want to @@ -15599,18 +15433,18 @@ dtrace_module_unloaded(struct modctl *modctl) template.dtpr_mod = ctl->mod_modname; # endif - dmutex_enter(&dtrace_provider_lock); - dmutex_enter(&mod_lock); - dmutex_enter(&dtrace_lock); + mutex_enter(&dtrace_provider_lock); + mutex_enter(&mod_lock); + mutex_enter(&dtrace_lock); if (dtrace_bymod == NULL) { /* * The DTrace module is loaded (obviously) but not attached; * we don't have any work to do. */ - dmutex_exit(&dtrace_provider_lock); - dmutex_exit(&mod_lock); - dmutex_exit(&dtrace_lock); + mutex_exit(&dtrace_provider_lock); + mutex_exit(&mod_lock); + mutex_exit(&dtrace_lock); return; } @@ -15618,9 +15452,9 @@ printk("dtrace_module_unloaded called %p '%s'\n", modctl, ctl->name); for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); probe != NULL; probe = probe->dtpr_nextmod) { if (probe->dtpr_ecb != NULL) { - dmutex_exit(&dtrace_provider_lock); - dmutex_exit(&mod_lock); - dmutex_exit(&dtrace_lock); + mutex_exit(&dtrace_provider_lock); + mutex_exit(&mod_lock); + mutex_exit(&dtrace_lock); /* * This shouldn't _actually_ be possible -- we're @@ -15681,9 +15515,9 @@ printk("dtrace_module_unloaded called %p '%s'\n", modctl, ctl->name); kmem_free(probe, sizeof (dtrace_probe_t)); } - dmutex_exit(&dtrace_lock); - dmutex_exit(&mod_lock); - dmutex_exit(&dtrace_provider_lock); + mutex_exit(&dtrace_lock); + mutex_exit(&mod_lock); + mutex_exit(&dtrace_provider_lock); } void @@ -15702,7 +15536,7 @@ static int dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) { ASSERT(MUTEX_HELD(&cpu_lock)); - dmutex_enter(&dtrace_lock); + mutex_enter(&dtrace_lock); switch (what) { case CPU_CONFIG: { @@ -15755,7 +15589,7 @@ PRINT_CASE(CPU_UNCONFIG); break; } - dmutex_exit(&dtrace_lock); + mutex_exit(&dtrace_lock); return (0); } @@ -16181,7 +16015,7 @@ PRINT_CASE(DTRACEHIOC_ADD); if (dof == NULL) return (rval); - dmutex_enter(&dtrace_lock); + mutex_enter(&dtrace_lock); /* * dtrace_helper_slurp() takes responsibility for the dof -- @@ -16194,15 +16028,15 @@ PRINT_CASE(DTRACEHIOC_ADD); rval = EINVAL; } - dmutex_exit(&dtrace_lock); + mutex_exit(&dtrace_lock); return (rval); } case DTRACEHIOC_REMOVE: { PRINT_CASE(DTRACEHIOC_REMOVE); - dmutex_enter(&dtrace_lock); + mutex_enter(&dtrace_lock); rval = dtrace_helper_destroygen(arg); - dmutex_exit(&dtrace_lock); + mutex_exit(&dtrace_lock); return (rval); } @@ -16257,14 +16091,14 @@ PRINT_CASE(DTRACEIOC_PROVIDER); RETURN(EFAULT); pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; - dmutex_enter(&dtrace_provider_lock); + mutex_enter(&dtrace_provider_lock); for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) break; } - dmutex_exit(&dtrace_provider_lock); + mutex_exit(&dtrace_provider_lock); if (pvp == NULL) RETURN(ESRCH); diff --git a/driver/dtrace_asm.c b/driver/dtrace_asm.c index 645e482..adf8d0f 100644 --- a/driver/dtrace_asm.c +++ b/driver/dtrace_asm.c @@ -237,58 +237,17 @@ dtrace_fuword64_nocheck(void *addr) /**********************************************************************/ dtrace_icookie_t dtrace_interrupt_disable(void) -{ long ret; - -#if defined(__amd64) - __asm( - "pushfq\n" - "popq %%rax\n" - "cli\n" - : "=a" (ret) - ); - return ret; - -#elif defined(__i386) - __asm( - "pushf\n" - "pop %%eax\n" - "cli\n" - : "=a" (ret) - : - ); - return ret; - -# elif defined(__arm__) +{ + unsigned long ret; raw_local_irq_save(ret); return ret; -# endif } + dtrace_icookie_t dtrace_interrupt_get(void) -{ long ret; - -#if defined(__amd64) - __asm( - "pushfq\n" - "popq %%rax\n" - : "=a" (ret) - ); +{ unsigned long ret; + dtrace_interrupt_enable(ret = dtrace_interrupt_disable()); return ret; -#elif defined(__i386) - - __asm( - "pushf\n" - "pop %%eax\n" - : "=a" (ret) - : - ); - return ret; - -# elif defined(__arm__) - raw_local_save_flags(ret); - return ret;; - -# endif } /**********************************************************************/ /* This routine restores interrupts previously saved by */ @@ -299,35 +258,7 @@ dtrace_interrupt_get(void) void dtrace_interrupt_enable(dtrace_icookie_t flags) { - -#if defined(__amd64) - __asm( - "pushq %0\n" - "popfq\n" - : - : "m" (flags) - ); - -#elif defined(__i386) - -// /***********************************************/ -// /* We get kernel warnings because we break */ -// /* the rules if we do the equivalent to */ -// /* x86-64. This seems to work. */ -// /***********************************************/ -// raw_local_irq_enable(); -//// native_irq_enable(); -// return; - __asm( - "push %0\n" - "popf\n" - : - : "a" (flags) - ); - -# elif defined(__arm__) raw_local_irq_restore(flags); -#endif } /*ARGSUSED*/ diff --git a/driver/dtrace_isa.c b/driver/dtrace_isa.c index 0a5dc13..2b65b4b 100644 --- a/driver/dtrace_isa.c +++ b/driver/dtrace_isa.c @@ -75,7 +75,7 @@ extern size_t _allsyscalls_size; /* We use the kernels stack dumper to avoid issues with cpu */ /* architecture and frame pointer. */ /**********************************************************************/ -MUTEX_DEFINE(dtrace_stack_mutex); +static DEFINE_SPINLOCK(dtrace_stack_lock); # if defined(HAVE_STACKTRACE_OPS) static pc_t *g_pcstack; static int g_pcstack_limit; @@ -257,7 +257,7 @@ dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes, /* in the callback, so lets avoid relying */ /* on the kernel stack walker. */ /***********************************************/ - dmutex_enter(&dtrace_stack_mutex); + spin_lock(&dtrace_stack_lock); g_depth = 0; g_pcstack = pcstack; g_pcstack_limit = pcstack_limit; @@ -268,7 +268,7 @@ dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes, dump_trace(NULL, NULL, NULL, &print_trace_ops, NULL); #endif depth = g_depth; - dmutex_exit(&dtrace_stack_mutex); + spin_unlock(&dtrace_stack_lock); #endif while (depth < pcstack_limit) diff --git a/driver/dtrace_linux.c b/driver/dtrace_linux.c index 36dc970..6d828b9 100644 --- a/driver/dtrace_linux.c +++ b/driver/dtrace_linux.c @@ -67,7 +67,6 @@ module_param(arg_kallsyms_lookup_name, charp, 0); extern char dtrace_buf[]; extern const int log_bufsiz; extern int dbuf_i; -extern int dtrace_safe; /**********************************************************************/ /* TRUE when we have called dtrace_linux_init(). After that point, */ @@ -146,7 +145,7 @@ cpu_core_t *cpu_core; cpu_t *cpu_table; cred_t *cpu_cred; int nr_cpus = 1; -MUTEX_DEFINE(mod_lock); +DEFINE_MUTEX(mod_lock); /**********************************************************************/ /* Set to true by debug code that wants to immediately disable */ @@ -164,7 +163,7 @@ int dtrace_shutdown; /**********************************************************************/ sol_proc_t *shadow_procs; -MUTEX_DEFINE(cpu_lock); +DEFINE_MUTEX(cpu_lock); int panic_quiesce; sol_proc_t *curthread; @@ -283,8 +282,6 @@ void signal_fini(void); int systrace_init(void); void systrace_exit(void); void io_prov_init(void); -void xcall_init(void); -void xcall_fini(void); //static void print_pte(pte_t *pte, int level); /**********************************************************************/ @@ -315,7 +312,11 @@ dtrace_clflush(void *ptr) /**********************************************************************/ cred_t * CRED() -{ cred_t *cr = &cpu_cred[cpu_get_id()]; +{ + cred_t *cr; + /* FIXME: This is a hack */ +#if 0 + cr = &cpu_cred[cpu_get_id()]; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) cr->cr_uid = current->cred->uid; @@ -325,6 +326,9 @@ CRED() cr->cr_gid = current->gid; #endif //printk("get cred end %d %d\n", cr->cr_uid, cr->cr_gid); +#else + cr = &cpu_cred[0]; +#endif return cr; } @@ -410,7 +414,14 @@ dtrace_gethrtime() /* to tsc and return nsec. */ /***********************************************/ if (native_sched_clock_ptr) { - return (*native_sched_clock_ptr)(); + hrtime_t r; + + /* XXX: This is a hack */ + preempt_disable(); + r = (*native_sched_clock_ptr)(); + preempt_enable(); + + return r; } /***********************************************/ /* Later kernels use this to allow access */ @@ -764,10 +775,12 @@ dtrace_linux_init(void) # define _PAGE_NX 0 # define _PAGE_RW 0 # endif + preempt_disable(); rdtscll(t); (void) dtrace_gethrtime(); rdtscll(t1); tsc_max_delta = t1 - t; + preempt_enable(); /***********************************************/ /* Let us grab the panics if we are in */ @@ -886,17 +899,6 @@ dtrace_mach_aframes(void) return 1; } -/**********************************************************************/ -/* Make this a function, since on earlier kernels, */ -/* mutex_is_locked() is an inline complex function which cannot be */ -/* used in an expression context (ASSERT(MUTEX_HELD()) in */ -/* dtrace.c) */ -/**********************************************************************/ -int -dtrace_mutex_is_locked(mutex_t *mp) -{ - return dmutex_is_locked(mp); -} /**********************************************************************/ /* Avoid calling real memcpy, since we will call this from */ /* interrupt context. */ @@ -1815,7 +1817,7 @@ return 0; /* shadow_procs for this purpose now. */ /**********************************************************************/ static struct par_alloc_t *hd_par; -static mutex_t par_mutex; +static DEFINE_MUTEX(par_mutex); void * par_alloc(int domain, void *ptr, int size, int *init) @@ -1831,16 +1833,16 @@ Need to FIX! return NULL; #endif - dmutex_enter(&par_mutex); + mutex_enter(&par_mutex); for (p = hd_par; p; p = p->pa_next) { if (p->pa_ptr == ptr && p->pa_domain == domain) { if (init) *init = FALSE; - dmutex_exit(&par_mutex); + mutex_exit(&par_mutex); return p; } } - dmutex_exit(&par_mutex); + mutex_exit(&par_mutex); if (init) *init = TRUE; @@ -1850,10 +1852,10 @@ return NULL; dtrace_bzero(p+1, size); p->pa_domain = domain; p->pa_ptr = ptr; - dmutex_enter(&par_mutex); + mutex_enter(&par_mutex); p->pa_next = hd_par; hd_par = p; - dmutex_exit(&par_mutex); + mutex_exit(&par_mutex); return p; } @@ -1881,10 +1883,10 @@ par_free(int domain, void *ptr) #if 0 return; #endif - dmutex_enter(&par_mutex); + mutex_enter(&par_mutex); if (hd_par == p && hd_par->pa_domain == domain) { hd_par = hd_par->pa_next; - dmutex_exit(&par_mutex); + mutex_exit(&par_mutex); kfree(ptr); return; } @@ -1892,13 +1894,13 @@ return; // printk("p1=%p\n", p1); } if (p1 == NULL) { - dmutex_exit(&par_mutex); + mutex_exit(&par_mutex); printk("where did p1 go?\n"); return; } if (p1->pa_next == p && p1->pa_domain == domain) p1->pa_next = p->pa_next; - dmutex_exit(&par_mutex); + mutex_exit(&par_mutex); kfree(ptr); } /**********************************************************************/ @@ -1909,14 +1911,14 @@ static void * par_lookup(void *ptr) { par_alloc_t *p; - dmutex_enter(&par_mutex); + mutex_enter(&par_mutex); for (p = hd_par; p; p = p->pa_next) { if (p->pa_ptr == ptr) { - dmutex_exit(&par_mutex); + mutex_exit(&par_mutex); return p; } } - dmutex_exit(&par_mutex); + mutex_exit(&par_mutex); return NULL; } /**********************************************************************/ @@ -2031,12 +2033,12 @@ proc_exit_notifier(struct notifier_block *n, unsigned long code, void *ptr) sol_proc.p_pid = current->pid; curthread = &sol_proc; - dmutex_init(&sol_proc.p_lock); - dmutex_enter(&sol_proc.p_lock); + mutex_init(&sol_proc.p_lock); + mutex_enter(&sol_proc.p_lock); dtrace_fasttrap_exit_ptr(&sol_proc); - dmutex_exit(&sol_proc.p_lock); + mutex_exit(&sol_proc.p_lock); return 0; } @@ -2263,7 +2265,6 @@ syms_write(struct file *file, const char __user *buf, /***********************************************/ dtrace_linux_init(); - xcall_init(); dtrace_profile_init(); dtrace_prcom_init(); dcpc_init(); @@ -2381,12 +2382,11 @@ vmem_create(const char *name, void *base, size_t size, size_t quantum, if (TRACE_ALLOC || dtrace_here) dtrace_printf("vmem_create(size=%d)\n", (int) size); - dmutex_init(&seqp->seq_mutex); + mutex_init(&seqp->seq_mutex); seqp->seq_id = 0; seqp->seq_magic = SEQ_MAGIC; dtrace_printf("vmem_create(%s) %p\n", name, seqp); -/* mutex_dump(&seqp->seq_mutex);*/ return seqp; } @@ -2689,8 +2689,6 @@ dtracedrv_write(struct file *file, const char __user *buf, len = bpend - cp; if (len >= 6 && strncmp(cp, "here=", 5) == 0) { dtrace_here = simple_strtoul(cp + 5, NULL, 0); - } else if (len >= 6 && strncmp(cp, "dtrace_safe=", 5) == 0) { - dtrace_safe = simple_strtoul(cp + 5, NULL, 0); } else if (di_cnt < MAX_SEC_LIST) { int ret = parse_sec(&di_list[di_cnt], cp, bpend); if (ret < 0) @@ -2790,10 +2788,7 @@ static int proc_dtrace_stats_show(struct seq_file *seq, void *v) extern unsigned long long cnt_int3_2; extern unsigned long long cnt_int3_3; extern unsigned long cnt_ipi1; - extern unsigned long long cnt_probe_recursion; extern unsigned long cnt_probes; - extern unsigned long long cnt_probe_noint; - extern unsigned long long cnt_probe_safe; extern unsigned long cnt_mtx1; extern unsigned long cnt_mtx2; extern unsigned long cnt_mtx3; @@ -2811,11 +2806,7 @@ static int proc_dtrace_stats_show(struct seq_file *seq, void *v) unsigned long *ptr; char *name; } stats[] = { - {TYPE_INT, (unsigned long *) &dtrace_safe, "dtrace_safe"}, {TYPE_LONG_LONG, &cnt_probes, "probes"}, - {TYPE_LONG, (unsigned long *) &cnt_probe_recursion, "probe_recursion"}, - LONG_LONG(cnt_probe_noint, "probe_noint"), - LONG_LONG(cnt_probe_safe, "probe_safe"), LONG_LONG(cnt_int1_1, "int1"), LONG_LONG(cnt_int3_1, "int3_1"), LONG_LONG(cnt_int3_2, "int3_2(ours)"), @@ -3100,11 +3091,11 @@ static struct proc_dir_entry *dir; /* to handle actual online cpus. */ /***********************************************/ cpu_list[i].cpu_next_onln = &cpu_list[i+1]; - dmutex_init(&cpu_list[i].cpu_ft_lock.k_mutex); + mutex_init(&cpu_list[i].cpu_ft_lock.k_mutex); } cpu_list[nr_cpus-1].cpu_next = cpu_list; for (i = 0; i < nr_cpus; i++) { - dmutex_init(&cpu_core[i].cpuc_pid_lock); + mutex_init(&cpu_core[i].cpuc_pid_lock); } /***********************************************/ /* Initialise the shadow procs. We dont */ @@ -3114,8 +3105,8 @@ static struct proc_dir_entry *dir; shadow_procs = (sol_proc_t *) vmalloc(sizeof(sol_proc_t) * PID_MAX_DEFAULT); memset(shadow_procs, 0, sizeof(sol_proc_t) * PID_MAX_DEFAULT); for (i = 0; i < PID_MAX_DEFAULT; i++) { - dmutex_init(&shadow_procs[i].p_lock); - dmutex_init(&shadow_procs[i].p_crlock); + mutex_init(&shadow_procs[i].p_lock); + mutex_init(&shadow_procs[i].p_crlock); } /***********************************************/ @@ -3212,8 +3203,6 @@ static void __exit dtracedrv_exit(void) remove_proc_entry("dtrace", 0); misc_deregister(&helper_dev); misc_deregister(&dtracedrv_dev); - - xcall_fini(); } module_init(dtracedrv_init); module_exit(dtracedrv_exit); diff --git a/driver/dtrace_linux.h b/driver/dtrace_linux.h index 226ce69..77ad26e 100644 --- a/driver/dtrace_linux.h +++ b/driver/dtrace_linux.h @@ -63,7 +63,7 @@ /**********************************************************************/ #define PATCHVAL 0xcc /* INT3 instruction */ -# define MUTEX_HELD dtrace_mutex_is_locked +# define MUTEX_HELD mutex_is_locked #define PRIV_EFFECTIVE (1 << 0) #define PRIV_DTRACE_KERNEL (1 << 1) @@ -422,7 +422,6 @@ int sulword(const void *addr, ulong_t value); int instr_in_text_seg(struct module *mp, char *name, Elf_Sym *sym); cpu_core_t *cpu_get_this(void); int is_kernel_text(unsigned long); -int dtrace_mutex_is_locked(mutex_t *); asmlinkage int dtrace_memcpy_with_error(void *, void *, size_t); void set_console_on(int flag); void dtrace_linux_panic(const char *, ...); diff --git a/driver/dtrace_proto.h b/driver/dtrace_proto.h index 1dc653d..6780488 100644 --- a/driver/dtrace_proto.h +++ b/driver/dtrace_proto.h @@ -67,7 +67,6 @@ void dtrace_parse_kernel(int, void (*callback)(uint8_t *, int), uint8_t *); int is_probable_instruction(instr_t *, int is_entry); void dtrace_instr_dump(char *label, uint8_t *insn); dtrace_icookie_t dtrace_interrupt_get(void); -void xcall_slave2(void); char * hrtime_str(hrtime_t s); int dtrace_xen_hypercall(int call, void *a, void *b, void *c); int dtrace_is_xen(void); diff --git a/driver/fasttrap.c b/driver/fasttrap.c index 52d3fdf..6004d08 100644 --- a/driver/fasttrap.c +++ b/driver/fasttrap.c @@ -59,18 +59,18 @@ sprlock(int pid) //printk("sprlock: pid=%d\n", pid); if (!p) return NULL; - dmutex_enter(&p->p_lock); + mutex_enter(&p->p_lock); return p; } void sprlock_proc(proc_t *p) { - dmutex_enter(&p->p_lock); + mutex_enter(&p->p_lock); } void sprunlock(proc_t *p) { - dmutex_exit(&p->p_lock); + mutex_exit(&p->p_lock); } # define RW_WRITER 2 #define SEXITING 0x00000002 /* process is exiting */ @@ -93,7 +93,7 @@ static void swap_func(void *p1, void *p2, int size) } # define qsort(base, num, size, cmp) sort(base, num, size, cmp, swap_func) -MUTEX_DEFINE(pidlock); +DEFINE_MUTEX(pidlock); void (*dtrace_fasttrap_fork_ptr)(proc_t *, proc_t *); void (*dtrace_fasttrap_exec_ptr)(proc_t *); @@ -220,7 +220,7 @@ static dev_info_t *fasttrap_devi; static dtrace_meta_provider_id_t fasttrap_meta_id; static timeout_id_t fasttrap_timeout; -static MUTEX_DEFINE(fasttrap_cleanup_mtx); +static DEFINE_MUTEX(fasttrap_cleanup_mtx); static uint_t fasttrap_cleanup_work; /* @@ -256,7 +256,7 @@ static fasttrap_hash_t fasttrap_provs; static fasttrap_hash_t fasttrap_procs; static uint64_t fasttrap_pid_count; /* pid ref count */ -static MUTEX_DEFINE(fasttrap_count_mtx); /* lock on ref count */ +static DEFINE_MUTEX(fasttrap_count_mtx); /* lock on ref count */ #define FASTTRAP_ENABLE_FAIL 1 #define FASTTRAP_ENABLE_PARTIAL 2 @@ -333,9 +333,9 @@ HERE(); sqp->sq_info.si_code = TRAP_DTRACE; sqp->sq_info.si_addr = (caddr_t)pc; - dmutex_enter(&p->p_lock); + mutex_enter(&p->p_lock); sigaddqa(p, t, sqp); - dmutex_exit(&p->p_lock); + mutex_exit(&p->p_lock); if (t != NULL) aston(t); @@ -373,8 +373,8 @@ HERE(); /* kernel. */ /***********************************************/ for (i = 0; i < num_online_cpus(); i++) { - dmutex_enter(&cpu_core[i].cpuc_pid_lock); - dmutex_exit(&cpu_core[i].cpuc_pid_lock); + mutex_enter(&cpu_core[i].cpuc_pid_lock); + mutex_exit(&cpu_core[i].cpuc_pid_lock); HERE(); } } @@ -397,10 +397,10 @@ fasttrap_pid_cleanup_cb(void *data) in = 1; HERE(); - dmutex_enter(&fasttrap_cleanup_mtx); + mutex_enter(&fasttrap_cleanup_mtx); while (fasttrap_cleanup_work) { fasttrap_cleanup_work = 0; - dmutex_exit(&fasttrap_cleanup_mtx); + mutex_exit(&fasttrap_cleanup_mtx); later = 0; @@ -412,7 +412,7 @@ HERE(); */ for (i = 0; i < fasttrap_provs.fth_nent; i++) { bucket = &fasttrap_provs.fth_table[i]; - dmutex_enter(&bucket->ftb_mtx); + mutex_enter(&bucket->ftb_mtx); fpp = (fasttrap_provider_t **)&bucket->ftb_data; while ((fp = *fpp) != NULL) { @@ -421,7 +421,7 @@ HERE(); continue; } - dmutex_enter(&fp->ftp_mtx); + mutex_enter(&fp->ftp_mtx); /* * If this provider has consumers actively @@ -431,7 +431,7 @@ HERE(); */ if (fp->ftp_ccount != 0 || fp->ftp_mcount != 0) { - dmutex_exit(&fp->ftp_mtx); + mutex_exit(&fp->ftp_mtx); fp->ftp_marked = 0; continue; } @@ -439,7 +439,7 @@ HERE(); if (!fp->ftp_retired || fp->ftp_rcount != 0) fp->ftp_marked = 0; - dmutex_exit(&fp->ftp_mtx); + mutex_exit(&fp->ftp_mtx); /* * If we successfully unregister this @@ -463,10 +463,10 @@ HERE(); fasttrap_provider_free(fp); } } - dmutex_exit(&bucket->ftb_mtx); + mutex_exit(&bucket->ftb_mtx); } - dmutex_enter(&fasttrap_cleanup_mtx); + mutex_enter(&fasttrap_cleanup_mtx); } ASSERT(fasttrap_timeout != 0); @@ -488,7 +488,7 @@ HERE(); else fasttrap_timeout = 0; - dmutex_exit(&fasttrap_cleanup_mtx); + mutex_exit(&fasttrap_cleanup_mtx); in = 0; } @@ -499,11 +499,11 @@ static void fasttrap_pid_cleanup(void) { HERE(); - dmutex_enter(&fasttrap_cleanup_mtx); + mutex_enter(&fasttrap_cleanup_mtx); fasttrap_cleanup_work = 1; if (fasttrap_timeout == 0) fasttrap_timeout = timeout(&fasttrap_pid_cleanup_cb, NULL, 1); - dmutex_exit(&fasttrap_cleanup_mtx); + mutex_exit(&fasttrap_cleanup_mtx); } /* @@ -538,9 +538,9 @@ printk("in fasttrap_fork\n"); * We don't have to worry about the child process disappearing * because we're in fork(). */ - dmutex_enter(&cp->p_lock); + mutex_enter(&cp->p_lock); sprlock_proc(cp); - dmutex_exit(&cp->p_lock); + mutex_exit(&cp->p_lock); /* * Iterate over every tracepoint looking for ones that belong to the @@ -550,7 +550,7 @@ printk("in fasttrap_fork\n"); fasttrap_tracepoint_t *tp; fasttrap_bucket_t *bucket = &fasttrap_tpoints.fth_table[i]; - dmutex_enter(&bucket->ftb_mtx); + mutex_enter(&bucket->ftb_mtx); for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { if (tp->ftt_pid == ppid && tp->ftt_proc->ftpc_acount != 0) { @@ -568,10 +568,10 @@ printk("in fasttrap_fork\n"); ASSERT(tp->ftt_proc->ftpc_acount != 0); } } - dmutex_exit(&bucket->ftb_mtx); + mutex_exit(&bucket->ftb_mtx); } - dmutex_enter(&cp->p_lock); + mutex_enter(&cp->p_lock); sprunlock(cp); } @@ -586,7 +586,7 @@ fasttrap_exec_exit(proc_t *p) ASSERT(p == curproc); ASSERT(MUTEX_HELD(&p->p_lock)); - dmutex_exit(&p->p_lock); + mutex_exit(&p->p_lock); //printk("fasttrap_exec_exit: pid=%d\n", p->p_pid); /* @@ -595,7 +595,7 @@ fasttrap_exec_exit(proc_t *p) */ fasttrap_provider_retire(p->p_pid, FASTTRAP_PID_NAME, 0); - dmutex_enter(&p->p_lock); + mutex_enter(&p->p_lock); } @@ -650,7 +650,7 @@ HERE(); * defunct. */ again: - dmutex_enter(&bucket->ftb_mtx); + mutex_enter(&bucket->ftb_mtx); for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { /* * Note that it's safe to access the active count on the @@ -702,7 +702,7 @@ HERE(); } HERE(); - dmutex_exit(&bucket->ftb_mtx); + mutex_exit(&bucket->ftb_mtx); HERE(); if (new_tp != NULL) { @@ -725,7 +725,7 @@ HERE(); membar_producer(); bucket->ftb_data = new_tp; membar_producer(); - dmutex_exit(&bucket->ftb_mtx); + mutex_exit(&bucket->ftb_mtx); /* * Activate the tracepoint in the ISA-specific manner. @@ -751,7 +751,7 @@ HERE(); } HERE(); - dmutex_exit(&bucket->ftb_mtx); + mutex_exit(&bucket->ftb_mtx); /* * Initialize the tracepoint that's been preallocated with the probe. @@ -821,7 +821,7 @@ fasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index) * ones registered with it. */ bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)]; - dmutex_enter(&bucket->ftb_mtx); + mutex_enter(&bucket->ftb_mtx); for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { if (tp->ftt_pid == pid && tp->ftt_pc == pc && tp->ftt_proc == provider->ftp_proc) @@ -899,7 +899,7 @@ fasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index) *tmp_tp = tp; } - dmutex_exit(&bucket->ftb_mtx); + mutex_exit(&bucket->ftb_mtx); /* * Tag the modified probe with the generation in which it was @@ -909,7 +909,7 @@ fasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index) return; } - dmutex_exit(&bucket->ftb_mtx); + mutex_exit(&bucket->ftb_mtx); /* * We can't safely remove the tracepoint from the set of active @@ -945,7 +945,7 @@ fasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index) /* * Remove the probe from the hash table of active tracepoints. */ - dmutex_enter(&bucket->ftb_mtx); + mutex_enter(&bucket->ftb_mtx); pp = (fasttrap_tracepoint_t **)&bucket->ftb_data; ASSERT(*pp != NULL); while (*pp != tp) { @@ -956,7 +956,7 @@ fasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index) *pp = tp->ftt_next; membar_producer(); - dmutex_exit(&bucket->ftb_mtx); + mutex_exit(&bucket->ftb_mtx); /* * Tag the modified probe with the generation in which it was changed. @@ -975,7 +975,7 @@ fasttrap_enable_callbacks(void) */ HERE(); //printk("fasttrap_pid_count=%ld\n", fasttrap_pid_count); - dmutex_enter(&fasttrap_count_mtx); + mutex_enter(&fasttrap_count_mtx); if (fasttrap_pid_count == 0) { ASSERT(dtrace_pid_probe_ptr == NULL); ASSERT(dtrace_return_probe_ptr == NULL); @@ -985,7 +985,7 @@ HERE(); ASSERT(dtrace_pid_probe_ptr == &fasttrap_pid_probe); ASSERT(dtrace_return_probe_ptr == &fasttrap_return_probe); fasttrap_pid_count++; - dmutex_exit(&fasttrap_count_mtx); + mutex_exit(&fasttrap_count_mtx); } static void @@ -994,7 +994,7 @@ fasttrap_disable_callbacks(void) ASSERT(MUTEX_HELD(&cpu_lock)); HERE(); - dmutex_enter(&fasttrap_count_mtx); + mutex_enter(&fasttrap_count_mtx); ASSERT(fasttrap_pid_count > 0); fasttrap_pid_count--; if (fasttrap_pid_count == 0) { @@ -1027,7 +1027,7 @@ HERE(); # endif } HERE(); - dmutex_exit(&fasttrap_count_mtx); + mutex_exit(&fasttrap_count_mtx); HERE(); } @@ -1050,9 +1050,9 @@ fasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg) * must increment this even if we aren't able to properly enable * this probe. */ - dmutex_enter(&probe->ftp_prov->ftp_mtx); + mutex_enter(&probe->ftp_prov->ftp_mtx); probe->ftp_prov->ftp_rcount++; - dmutex_exit(&probe->ftp_prov->ftp_mtx); + mutex_exit(&probe->ftp_prov->ftp_mtx); /* * If this probe's provider is retired (meaning it was valid in a @@ -1074,7 +1074,7 @@ HERE(); return 0; HERE(); - dmutex_enter(&pidlock); + mutex_enter(&pidlock); p = prfind(probe->ftp_pid); /* @@ -1085,14 +1085,14 @@ HERE(); ASSERT(p->p_parent == curproc); ASSERT(p->p_stat == SIDL); - dmutex_enter(&p->p_lock); - dmutex_exit(&pidlock); + mutex_enter(&p->p_lock); + mutex_exit(&pidlock); sprlock_proc(p); } ASSERT(!(p->p_flag & SVFORK)); - dmutex_exit(&p->p_lock); + mutex_exit(&p->p_lock); HERE(); //printk("pid=%d p=%p\n", probe->ftp_pid, p); @@ -1132,7 +1132,7 @@ HERE(); i--; } - dmutex_enter(&p->p_lock); + mutex_enter(&p->p_lock); sprunlock(p); /* @@ -1144,7 +1144,7 @@ HERE(); } } - dmutex_enter(&p->p_lock); + mutex_enter(&p->p_lock); sprunlock(p); probe->ftp_enabled = 1; @@ -1172,11 +1172,11 @@ HERE(); HERE(); if ((p = sprlock(probe->ftp_pid)) != NULL) { ASSERT(!(p->p_flag & SVFORK)); - dmutex_exit(&p->p_lock); + mutex_exit(&p->p_lock); } HERE(); - dmutex_enter(&provider->ftp_mtx); + mutex_enter(&provider->ftp_mtx); HERE(); /* @@ -1200,9 +1200,9 @@ HERE(); */ if (provider->ftp_retired && !provider->ftp_marked) whack = provider->ftp_marked = 1; - dmutex_exit(&provider->ftp_mtx); + mutex_exit(&provider->ftp_mtx); - dmutex_enter(&p->p_lock); + mutex_enter(&p->p_lock); sprunlock(p); } else { /* @@ -1211,7 +1211,7 @@ HERE(); */ if (provider->ftp_rcount == 0 && !provider->ftp_marked) whack = provider->ftp_marked = 1; - dmutex_exit(&provider->ftp_mtx); + mutex_exit(&provider->ftp_mtx); } if (whack) @@ -1335,17 +1335,17 @@ fasttrap_proc_lookup(pid_t pid) bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)]; HERE(); - dmutex_enter(&bucket->ftb_mtx); + mutex_enter(&bucket->ftb_mtx); HERE(); for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) { if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) { - dmutex_enter(&fprc->ftpc_mtx); - dmutex_exit(&bucket->ftb_mtx); + mutex_enter(&fprc->ftpc_mtx); + mutex_exit(&bucket->ftb_mtx); fprc->ftpc_rcount++; atomic_add_64(&fprc->ftpc_acount, 1); ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount); - dmutex_exit(&fprc->ftpc_mtx); + mutex_exit(&fprc->ftpc_mtx); HERE(); return (fprc); @@ -1357,15 +1357,15 @@ HERE(); * Drop the bucket lock so we don't try to perform a sleeping * allocation under it. */ - dmutex_exit(&bucket->ftb_mtx); + mutex_exit(&bucket->ftb_mtx); new_fprc = kmem_zalloc(sizeof (fasttrap_proc_t), KM_SLEEP); new_fprc->ftpc_pid = pid; new_fprc->ftpc_rcount = 1; new_fprc->ftpc_acount = 1; - dmutex_init(&new_fprc->ftpc_mtx); + mutex_init(&new_fprc->ftpc_mtx); - dmutex_enter(&bucket->ftb_mtx); + mutex_enter(&bucket->ftb_mtx); /* * Take another lap through the list to make sure a proc hasn't @@ -1373,12 +1373,12 @@ HERE(); */ for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) { if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) { - dmutex_enter(&fprc->ftpc_mtx); - dmutex_exit(&bucket->ftb_mtx); + mutex_enter(&fprc->ftpc_mtx); + mutex_exit(&bucket->ftb_mtx); fprc->ftpc_rcount++; atomic_add_64(&fprc->ftpc_acount, 1); ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount); - dmutex_exit(&fprc->ftpc_mtx); + mutex_exit(&fprc->ftpc_mtx); kmem_free(new_fprc, sizeof (fasttrap_proc_t)); HERE(); @@ -1391,7 +1391,7 @@ HERE(); new_fprc->ftpc_next = bucket->ftb_data; bucket->ftb_data = new_fprc; - dmutex_exit(&bucket->ftb_mtx); + mutex_exit(&bucket->ftb_mtx); return (new_fprc); } @@ -1404,19 +1404,19 @@ fasttrap_proc_release(fasttrap_proc_t *proc) pid_t pid = proc->ftpc_pid; HERE(); - dmutex_enter(&proc->ftpc_mtx); + mutex_enter(&proc->ftpc_mtx); ASSERT(proc->ftpc_rcount != 0); ASSERT(proc->ftpc_acount <= proc->ftpc_rcount); if (--proc->ftpc_rcount != 0) { HERE(); - dmutex_exit(&proc->ftpc_mtx); + mutex_exit(&proc->ftpc_mtx); return; } HERE(); - dmutex_exit(&proc->ftpc_mtx); + mutex_exit(&proc->ftpc_mtx); /* * There should definitely be no live providers associated with this @@ -1425,7 +1425,7 @@ HERE(); ASSERT(proc->ftpc_acount == 0); bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)]; - dmutex_enter(&bucket->ftb_mtx); + mutex_enter(&bucket->ftb_mtx); fprcp = (fasttrap_proc_t **)&bucket->ftb_data; while ((fprc = *fprcp) != NULL) { @@ -1445,7 +1445,7 @@ HERE(); *fprcp = fprc->ftpc_next; - dmutex_exit(&bucket->ftb_mtx); + mutex_exit(&bucket->ftb_mtx); HERE(); kmem_free(fprc, sizeof (fasttrap_proc_t)); @@ -1472,7 +1472,7 @@ fasttrap_provider_lookup(pid_t pid, const char *name, ASSERT(pattr != NULL); bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)]; - dmutex_enter(&bucket->ftb_mtx); + mutex_enter(&bucket->ftb_mtx); /* * Take a lap through the list and return the match if we find it. @@ -1480,8 +1480,8 @@ fasttrap_provider_lookup(pid_t pid, const char *name, for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) { if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 && !fp->ftp_retired) { - dmutex_enter(&fp->ftp_mtx); - dmutex_exit(&bucket->ftb_mtx); + mutex_enter(&fp->ftp_mtx); + mutex_exit(&bucket->ftb_mtx); return (fp); } } @@ -1490,24 +1490,24 @@ fasttrap_provider_lookup(pid_t pid, const char *name, * Drop the bucket lock so we don't try to perform a sleeping * allocation under it. */ - dmutex_exit(&bucket->ftb_mtx); + mutex_exit(&bucket->ftb_mtx); /* * Make sure the process exists, isn't a child created as the result * of a vfork(2), and isn't a zombie (but may be in fork). */ - dmutex_enter(&pidlock); + mutex_enter(&pidlock); if ((p = prfind(pid)) == NULL) { HERE(); - dmutex_exit(&pidlock); + mutex_exit(&pidlock); return (NULL); } HERE(); - dmutex_enter(&p->p_lock); - dmutex_exit(&pidlock); + mutex_enter(&p->p_lock); + mutex_exit(&pidlock); if (p->p_flag & (SVFORK | SEXITING)) { HERE(); - dmutex_exit(&p->p_lock); + mutex_exit(&p->p_lock); return (NULL); } HERE(); @@ -1523,21 +1523,21 @@ HERE(); * Grab the credentials for this process so we have * something to pass to dtrace_register(). */ - dmutex_enter(&p->p_crlock); + mutex_enter(&p->p_crlock); crhold(p->p_cred); cred = p->p_cred; - dmutex_exit(&p->p_crlock); - dmutex_exit(&p->p_lock); + mutex_exit(&p->p_crlock); + mutex_exit(&p->p_lock); new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP); new_fp->ftp_pid = pid; new_fp->ftp_proc = fasttrap_proc_lookup(pid); - dmutex_init(&new_fp->ftp_mtx); - dmutex_init(&new_fp->ftp_cmtx); + mutex_init(&new_fp->ftp_mtx); + mutex_init(&new_fp->ftp_cmtx); ASSERT(new_fp->ftp_proc != NULL); - dmutex_enter(&bucket->ftb_mtx); + mutex_enter(&bucket->ftb_mtx); /* * Take another lap through the list to make sure a provider hasn't @@ -1546,8 +1546,8 @@ HERE(); for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) { if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 && !fp->ftp_retired) { - dmutex_enter(&fp->ftp_mtx); - dmutex_exit(&bucket->ftb_mtx); + mutex_enter(&fp->ftp_mtx); + mutex_exit(&bucket->ftb_mtx); fasttrap_provider_free(new_fp); crfree(cred); return (fp); @@ -1570,7 +1570,7 @@ HERE(); DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER | DTRACE_PRIV_ZONEOWNER, cred, pattr == &pid_attr ? &pid_pops : &usdt_pops, new_fp, &new_fp->ftp_provid) != 0) { - dmutex_exit(&bucket->ftb_mtx); + mutex_exit(&bucket->ftb_mtx); fasttrap_provider_free(new_fp); crfree(cred); return (NULL); @@ -1580,8 +1580,8 @@ HERE(); bucket->ftb_data = new_fp; //printk("%s(%d):new USDT provider: '%s'\n", __FILE__, __LINE__, provname); - dmutex_enter(&new_fp->ftp_mtx); - dmutex_exit(&bucket->ftb_mtx); + mutex_enter(&new_fp->ftp_mtx); + mutex_exit(&bucket->ftb_mtx); crfree(cred); return (new_fp); @@ -1624,19 +1624,19 @@ HERE(); * corresponds to this process's hash chain in the provider hash * table. Don't sweat it if we can't find the process. */ - dmutex_enter(&pidlock); + mutex_enter(&pidlock); if ((p = prfind(pid)) == NULL) { HERE(); - dmutex_exit(&pidlock); + mutex_exit(&pidlock); return; } HERE(); - dmutex_enter(&p->p_lock); - dmutex_exit(&pidlock); + mutex_enter(&p->p_lock); + mutex_exit(&pidlock); p->p_dtrace_probes--; - dmutex_exit(&p->p_lock); + mutex_exit(&p->p_lock); } static void @@ -1650,7 +1650,7 @@ fasttrap_provider_retire(pid_t pid, const char *name, int mprov) ASSERT(strlen(name) < sizeof (fp->ftp_name)); bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)]; - dmutex_enter(&bucket->ftb_mtx); + mutex_enter(&bucket->ftb_mtx); for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) { if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 && @@ -1660,17 +1660,17 @@ fasttrap_provider_retire(pid_t pid, const char *name, int mprov) if (fp == NULL) { //printk("didnt find pid\n"); - dmutex_exit(&bucket->ftb_mtx); + mutex_exit(&bucket->ftb_mtx); return; } HERE(); - dmutex_enter(&fp->ftp_mtx); + mutex_enter(&fp->ftp_mtx); ASSERT(!mprov || fp->ftp_mcount > 0); if (mprov && --fp->ftp_mcount != 0) { HERE(); - dmutex_exit(&fp->ftp_mtx); - dmutex_exit(&bucket->ftb_mtx); + mutex_exit(&fp->ftp_mtx); + mutex_exit(&bucket->ftb_mtx); return; } HERE(); @@ -1696,7 +1696,7 @@ HERE(); fp->ftp_retired = 1; fp->ftp_marked = 1; provid = fp->ftp_provid; - dmutex_exit(&fp->ftp_mtx); + mutex_exit(&fp->ftp_mtx); /* * We don't have to worry about invalidating the same provider twice @@ -1707,7 +1707,7 @@ HERE(); dtrace_invalidate(provid); HERE(); - dmutex_exit(&bucket->ftb_mtx); + mutex_exit(&bucket->ftb_mtx); printk("%s:%d: calling pid cleanup\n", __func__, __LINE__); fasttrap_pid_cleanup(); @@ -1767,7 +1767,7 @@ fasttrap_add_probe(fasttrap_probe_spec_t *pdata) * for pending deletions when we drop this reference count. */ provider->ftp_ccount++; - dmutex_exit(&provider->ftp_mtx); + mutex_exit(&provider->ftp_mtx); /* * Grab the creation lock to ensure consistency between calls to @@ -1776,7 +1776,7 @@ fasttrap_add_probe(fasttrap_probe_spec_t *pdata) * before taking this lock to avoid a three-way deadlock with the * DTrace framework. */ - dmutex_enter(&provider->ftp_cmtx); + mutex_enter(&provider->ftp_cmtx); if (name == NULL) { HERE(); @@ -1873,7 +1873,7 @@ HERE(); pdata->ftps_mod, pdata->ftps_func, name, aframes, pp); } - dmutex_exit(&provider->ftp_cmtx); + mutex_exit(&provider->ftp_cmtx); /* * We know that the provider is still valid since we incremented the @@ -1881,10 +1881,10 @@ HERE(); * while we were using it (e.g. because the process called exec(2) or * exit(2)), take note of that and try to clean it up now. */ - dmutex_enter(&provider->ftp_mtx); + mutex_enter(&provider->ftp_mtx); provider->ftp_ccount--; whack = provider->ftp_retired; - dmutex_exit(&provider->ftp_mtx); + mutex_exit(&provider->ftp_mtx); if (whack) fasttrap_pid_cleanup(); @@ -1898,11 +1898,11 @@ HERE(); * the user has accidentally created many more probes than was * intended (e.g. pid123:::). */ - dmutex_exit(&provider->ftp_cmtx); - dmutex_enter(&provider->ftp_mtx); + mutex_exit(&provider->ftp_cmtx); + mutex_enter(&provider->ftp_mtx); provider->ftp_ccount--; provider->ftp_marked = 1; - dmutex_exit(&provider->ftp_mtx); + mutex_exit(&provider->ftp_mtx); fasttrap_pid_cleanup(); @@ -2104,7 +2104,7 @@ HERE(); provider->ftp_mcount++; HERE(); - dmutex_exit(&provider->ftp_mtx); + mutex_exit(&provider->ftp_mtx); return (provider); } @@ -2151,11 +2151,11 @@ HERE(); * dtrace_probe_lookup() and dtrace_probe_create() in the face of * other threads creating probes. */ - dmutex_enter(&provider->ftp_cmtx); + mutex_enter(&provider->ftp_cmtx); if (dtrace_probe_lookup(provider->ftp_provid, dhpb->dthpb_mod, dhpb->dthpb_func, dhpb->dthpb_name) != 0) { - dmutex_exit(&provider->ftp_cmtx); + mutex_exit(&provider->ftp_cmtx); return; } @@ -2166,7 +2166,7 @@ HERE(); if (fasttrap_total > fasttrap_max) { atomic_add_32(&fasttrap_total, -ntps); - dmutex_exit(&provider->ftp_cmtx); + mutex_exit(&provider->ftp_cmtx); return; } @@ -2232,7 +2232,7 @@ HERE(); pp->ftp_id = dtrace_probe_create(provider->ftp_provid, dhpb->dthpb_mod, dhpb->dthpb_func, dhpb->dthpb_name, FASTTRAP_OFFSET_AFRAMES, pp); - dmutex_exit(&provider->ftp_cmtx); + mutex_exit(&provider->ftp_cmtx); } /*ARGSUSED*/ @@ -2330,25 +2330,25 @@ fasttrap_ioctl(struct file *fp, int cmd, intptr_t arg, int md, cred_t *cr, int * proc_t *p; pid_t pid = probe->ftps_pid; - dmutex_enter(&pidlock); + mutex_enter(&pidlock); /* * Report an error if the process doesn't exist * or is actively being birthed. */ if ((p = prfind(pid)) == NULL || p->p_stat == SIDL) { - dmutex_exit(&pidlock); + mutex_exit(&pidlock); return (ESRCH); } - dmutex_enter(&p->p_lock); - dmutex_exit(&pidlock); + mutex_enter(&p->p_lock); + mutex_exit(&pidlock); if ((ret = priv_proc_cred_perm(cr, p, NULL, VREAD | VWRITE)) != 0) { - dmutex_exit(&p->p_lock); + mutex_exit(&p->p_lock); return (ret); } - dmutex_exit(&p->p_lock); + mutex_exit(&p->p_lock); } ret = fasttrap_add_probe(probe); @@ -2370,30 +2370,30 @@ fasttrap_ioctl(struct file *fp, int cmd, intptr_t arg, int md, cred_t *cr, int * proc_t *p; pid_t pid = instr.ftiq_pid; - dmutex_enter(&pidlock); + mutex_enter(&pidlock); /* * Report an error if the process doesn't exist * or is actively being birthed. */ if ((p = prfind(pid)) == NULL || p->p_stat == SIDL) { - dmutex_exit(&pidlock); + mutex_exit(&pidlock); return (ESRCH); } - dmutex_enter(&p->p_lock); - dmutex_exit(&pidlock); + mutex_enter(&p->p_lock); + mutex_exit(&pidlock); if ((ret = priv_proc_cred_perm(cr, p, NULL, VREAD)) != 0) { - dmutex_exit(&p->p_lock); + mutex_exit(&p->p_lock); return (ret); } - dmutex_exit(&p->p_lock); + mutex_exit(&p->p_lock); } index = FASTTRAP_TPOINTS_INDEX(instr.ftiq_pid, instr.ftiq_pc); - dmutex_enter(&fasttrap_tpoints.fth_table[index].ftb_mtx); + mutex_enter(&fasttrap_tpoints.fth_table[index].ftb_mtx); tp = fasttrap_tpoints.fth_table[index].ftb_data; while (tp != NULL) { if (instr.ftiq_pid == tp->ftt_pid && @@ -2405,13 +2405,13 @@ fasttrap_ioctl(struct file *fp, int cmd, intptr_t arg, int md, cred_t *cr, int * } if (tp == NULL) { - dmutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx); + mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx); return (ENOENT); } bcopy(&tp->ftt_instr, &instr.ftiq_instr, sizeof (instr.ftiq_instr)); - dmutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx); + mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx); if (copyout(&instr, (void *)arg, sizeof (instr)) != 0) return (EFAULT); @@ -2532,7 +2532,7 @@ fasttrap_attach(void) fasttrap_tpoints.fth_table = kmem_zalloc(fasttrap_tpoints.fth_nent * sizeof (fasttrap_bucket_t), KM_SLEEP); for (i = 0; i < fasttrap_tpoints.fth_nent; i++) { - dmutex_init(&fasttrap_tpoints.fth_table[i].ftb_mtx); + mutex_init(&fasttrap_tpoints.fth_table[i].ftb_mtx); } /* @@ -2550,7 +2550,7 @@ fasttrap_attach(void) fasttrap_provs.fth_table = kmem_zalloc(fasttrap_provs.fth_nent * sizeof (fasttrap_bucket_t), KM_SLEEP); for (i = 0; i < fasttrap_provs.fth_nent; i++) { - dmutex_init(&fasttrap_provs.fth_table[i].ftb_mtx); + mutex_init(&fasttrap_provs.fth_table[i].ftb_mtx); } /* @@ -2567,7 +2567,7 @@ HERE(); fasttrap_procs.fth_table = kmem_zalloc(fasttrap_procs.fth_nent * sizeof (fasttrap_bucket_t), KM_SLEEP); for (i = 0; i < fasttrap_procs.fth_nent; i++) { - dmutex_init(&fasttrap_procs.fth_table[i].ftb_mtx); + mutex_init(&fasttrap_procs.fth_table[i].ftb_mtx); } HERE(); @@ -2620,7 +2620,7 @@ HERE(); * to a non-zero value, and wait for the current timeout to complete. */ HERE(); - dmutex_enter(&fasttrap_cleanup_mtx); + mutex_enter(&fasttrap_cleanup_mtx); fasttrap_cleanup_work = 0; HERE(); @@ -2629,14 +2629,14 @@ HERE(); fasttrap_timeout = (timeout_id_t)1; if (tmp != 0) { - dmutex_exit(&fasttrap_cleanup_mtx); + mutex_exit(&fasttrap_cleanup_mtx); (void) untimeout(tmp); - dmutex_enter(&fasttrap_cleanup_mtx); + mutex_enter(&fasttrap_cleanup_mtx); } } fasttrap_cleanup_work = 0; - dmutex_exit(&fasttrap_cleanup_mtx); + mutex_exit(&fasttrap_cleanup_mtx); HERE(); /* @@ -2647,7 +2647,7 @@ HERE(); fasttrap_provider_t **fpp, *fp; fasttrap_bucket_t *bucket = &fasttrap_provs.fth_table[i]; - dmutex_enter(&bucket->ftb_mtx); + mutex_enter(&bucket->ftb_mtx); fpp = (fasttrap_provider_t **)&bucket->ftb_data; while ((fp = *fpp) != NULL) { /* @@ -2657,8 +2657,8 @@ HERE(); * bucket lock so there's no chance of another thread * blocking on the provider's lock. */ - dmutex_enter(&fp->ftp_mtx); - dmutex_exit(&fp->ftp_mtx); + mutex_enter(&fp->ftp_mtx); + mutex_exit(&fp->ftp_mtx); if (dtrace_unregister(fp->ftp_provid) != 0) { fail = 1; @@ -2669,7 +2669,7 @@ HERE(); } } - dmutex_exit(&bucket->ftb_mtx); + mutex_exit(&bucket->ftb_mtx); } HERE(); @@ -2680,10 +2680,10 @@ HERE(); * and start a new timeout if any work has accumulated while * we've been unsuccessfully trying to detach. */ - dmutex_enter(&fasttrap_cleanup_mtx); + mutex_enter(&fasttrap_cleanup_mtx); fasttrap_timeout = 0; work = fasttrap_cleanup_work; - dmutex_exit(&fasttrap_cleanup_mtx); + mutex_exit(&fasttrap_cleanup_mtx); if (work) fasttrap_pid_cleanup(); @@ -2695,9 +2695,9 @@ HERE(); } #ifdef DEBUG - dmutex_enter(&fasttrap_count_mtx); + mutex_enter(&fasttrap_count_mtx); ASSERT(fasttrap_pid_count == 0); - dmutex_exit(&fasttrap_count_mtx); + mutex_exit(&fasttrap_count_mtx); #endif HERE(); diff --git a/driver/fasttrap_isa.c b/driver/fasttrap_isa.c index 5b69ac6..624f357 100644 --- a/driver/fasttrap_isa.c +++ b/driver/fasttrap_isa.c @@ -730,7 +730,7 @@ fasttrap_return_common(struct regs *rp, uintptr_t pc, pid_t pid, kmutex_t *pid_mtx; pid_mtx = &cpu_core[cpu_get_id()].cpuc_pid_lock; - dmutex_enter(pid_mtx); + mutex_enter(pid_mtx); bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)]; for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { @@ -745,7 +745,7 @@ fasttrap_return_common(struct regs *rp, uintptr_t pc, pid_t pid, * is not essential to the correct execution of the process. */ if (tp == NULL) { - dmutex_exit(pid_mtx); + mutex_exit(pid_mtx); return; } @@ -766,7 +766,7 @@ fasttrap_return_common(struct regs *rp, uintptr_t pc, pid_t pid, rp->r_r0, rp->r_r1, 0, 0); } - dmutex_exit(pid_mtx); + mutex_exit(pid_mtx); } static void @@ -788,9 +788,9 @@ printk("SORRY - sending SIGSEGV\n"); sqp->sq_info.si_code = SEGV_MAPERR; sqp->sq_info.si_addr = (caddr_t)addr; - dmutex_enter(&p->p_lock); + mutex_enter(&p->p_lock); sigaddqa(p, t, sqp); - dmutex_exit(&p->p_lock); + mutex_exit(&p->p_lock); if (t != NULL) aston(t); @@ -1065,7 +1065,7 @@ HERE(); pid = p->p_pid; pid_mtx = &cpu_core[cpu_get_id()].cpuc_pid_lock; - dmutex_enter(pid_mtx); + mutex_enter(pid_mtx); bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)]; //printk("probe: bucket=%p pid=%d pc=%p\n", bucket, pid, (void *) pc); HERE(); @@ -1085,7 +1085,7 @@ HERE(); * fasttrap_ioctl), or somehow we have mislaid this tracepoint. */ if (tp == NULL) { - dmutex_exit(pid_mtx); + mutex_exit(pid_mtx); HERE(); return (-1); } @@ -1209,7 +1209,7 @@ HERE(); * tracepoint again later if we need to light up any return probes. */ tp_local = *tp; - dmutex_exit(pid_mtx); + mutex_exit(pid_mtx); tp = &tp_local; /* diff --git a/driver/intr_x86-64.S b/driver/intr_x86-64.S index a24538e..7294c64 100644 --- a/driver/intr_x86-64.S +++ b/driver/intr_x86-64.S @@ -391,7 +391,7 @@ INTERRUPT T_DTRACE_RET, 1, 0, dtrace_int_dtrace_ret, dtrace_int_dtrace_ret_hand FUNCTION dtrace_int_ipi dtrace_int_ipi: PUSH_REGS 0, 0 - call xcall_slave + //call xcall_slave POP_REGS 0, 0 INTR_RETURN 0 @@ -422,7 +422,7 @@ dtrace_int_nmi: jz do_kernel_nmi // For us... movb $0,nmi_masks(%rax) - call xcall_slave + //call xcall_slave POP_REGS 0, 0 INTR_RETURN 0 @@ -437,24 +437,6 @@ iret_instr: iret_addr: .quad iret_instr -/**********************************************************************/ -/* Following is a hack experiment to intercept certain Xen */ -/* callbacks for IPI debugging. */ -/**********************************************************************/ -//.data -//.global hypcnt -//hypcnt: .quad 0 -//.text -// FUNCTION hypcall -// .p2align 5 -//hypcall: -// incq hypcnt -// PUSH_REGS 0,0 -// call xcall_slave -// POP_REGS 0,0 -//// jmp 0xffffffff81666e00 // xen_hvm_callback_vector xen_evtchn_do_upcall -// jmp 0xffffffff81666d00 // xen_hypercall_callback - /**********************************************************************/ /* We define mcount function, so that we dont call into the */ /* kernels mcount. If we try and probe mcount, we want to see the */ diff --git a/driver/mutex.c b/driver/mutex.c index ec29641..30700c9 100644 --- a/driver/mutex.c +++ b/driver/mutex.c @@ -11,7 +11,7 @@ /* called from probe context. */ /* */ /* Upper half mutexes are accessed via mutex_enter/mutex_exit. */ -/* Lower half via dmutex_enter/dmutex_exit. */ +/* Lower half via mutex_enter/mutex_exit. */ /* */ /* The same mutex may be called by either function, but must be */ /* limited to the appropriate half of the kernel. */ @@ -20,7 +20,7 @@ /* deadlock. mutex_enter will grab the mutex, and disable */ /* interrupts during the grab, but re-enable after the grab. */ /* */ -/* dmutex_enter will leave interrupts disabled. */ +/* mutex_enter will leave interrupts disabled. */ /* */ /* We need to avoid the spin locks and semaphores/mutexes of the */ /* kernel, because they do preemption advice, and the */ @@ -40,240 +40,23 @@ unsigned long cnt_mtx1; unsigned long cnt_mtx2; unsigned long cnt_mtx3; -static const int disable_ints; - -void -dmutex_init(mutex_t *mp) -{ - /***********************************************/ - /* Linux changed from MUTEX to SEMAPHORE. */ - /* We want a very low cost mutex - mainly */ - /* so we can avoid reentrancy issues when */ - /* placing probes - we dont do many mutex */ - /* operations, but they do have to work */ - /* reliably when doing xcalls. */ - /***********************************************/ -#if defined(DEFINE_SEMAPHORE) -static DEFINE_SEMAPHORE(null_sema); -#else -static DECLARE_MUTEX(null_sema); -#endif - - memset(mp, 0, sizeof *mp); - mp->m_sem = null_sema; -// sema_init(&mp->m_sem, 1); - mp->m_initted = TRUE; -} - -/**********************************************************************/ -/* Do the work of acquiring and blocking on a mutex. "mutex_enter" */ -/* is for normal upper-layer code, e.g. the ioctl(), whereas */ -/* "dmutex_enter" is for interrupt level code. */ -/* */ -/* We could probably coalesce the functions and use the kernel */ -/* irqs_disabled() and hard_irq_count() functions, but we dont, */ -/* for now. */ -/**********************************************************************/ -void -mutex_enter_common(mutex_t *mp, int dflag) -{ unsigned long flags; - unsigned int cnt; - - if (!mp->m_initted) { - /***********************************************/ - /* Special debug: detect a dynamic mutex */ - /* being used (one coming from a kmalloc */ - /* type block of memory), vs the statically */ - /* defined ones). */ - /***********************************************/ - if (mp->m_initted != 2) { - dtrace_printf("initting a mutex\n"); - dump_stack(); - } - dmutex_init(mp); - } - - /***********************************************/ - /* Check for recursive mutex. Theres a */ - /* number of scenarios. */ - /* */ - /* Non-intr followed by an intr: we have to */ - /* allow the intr. */ - /* */ - /* Non-intr followed by non-intr: normal */ - /* recursive mutex. */ - /* */ - /* Intr followed by an intr: shouldnt */ - /* happen. */ - /* */ - /* We mustnt allow us to be put on another */ - /* cpu, else we will lose track of which */ - /* cpu has the mutex. */ - /* */ - /* Now that the mutex code is working, we */ - /* mustnt allow recursive mutexes. This */ - /* causes problems for two dtrace user */ - /* space apps running at the same time. */ - /* Turn off for now. Later on, we can */ - /* delete the code below. */ - /***********************************************/ - if (0 && mp->m_count && mp->m_cpu == smp_processor_id()) { - static int x; - if (x++ < 4 || (x < 1000000 && (x % 5000) == 0)) - dtrace_printf("%p mutex recursive, dflag=%d %d [%d]\n", mp, dflag, mp->m_type, x); - mp->m_level++; - return; - } - - if (disable_ints && dflag) - flags = dtrace_interrupt_disable(); - else - flags = dtrace_interrupt_get(); - - for (cnt = 0; dtrace_casptr(&mp->m_count, 0, (void *) 1) == (void *) 1; ) { - /***********************************************/ - /* We are waiting for the lock. Someone */ - /* else has it. Someone else might be */ - /* waiting for us (xcall), so occasionally */ - /* empty the xcall queue for us. */ - /***********************************************/ - if ((cnt++ % 100) == 0) - xcall_slave2(); - - /***********************************************/ - /* If we are running in the upper half of */ - /* the kernel, periodically let the */ - /* scheduler run, to avoid deadlock when */ - /* running N+1 copies of dtrace on an N CPU */ - /* system. */ - /***********************************************/ - if (/*!dflag &&*/ (cnt % 2000) == 0) - schedule(); - - /***********************************************/ - /* If we start locking up the kernel, let */ - /* user know something bad is happening. */ - /* Probably pointless if mutex is working */ - /* correctly. */ - /***********************************************/ - if ((cnt % (500 * 1000 * 1000)) == 0) { - dtrace_printf("mutex_enter: taking a long time to grab lock mtx3=%llu\n", cnt_mtx3); - cnt_mtx3++; - } - } -//preempt_disable(); - mp->m_flags = flags; - mp->m_cpu = smp_processor_id(); - mp->m_level = 1; - mp->m_type = dflag; -} - -/**********************************************************************/ -/* Enter from interrupt context, interrupts might be disabled. */ -/**********************************************************************/ -void -dmutex_enter(mutex_t *mp) -{ - cnt_mtx1++; - mutex_enter_common(mp, TRUE); -} - /**********************************************************************/ -/* Enter from the upper-level of the kernel, with interrupts */ -/* enabled. */ +/* Lock mutex */ /**********************************************************************/ void mutex_enter(mutex_t *mp) { - mutex_t imp = *mp; -/*static int f; -if (f++ == 70) { - int c = smp_processor_id(); - unsigned long x = 0; - for (x = 0; x < 4000000000UL; x++) { - if (c != smp_processor_id()) - break; - } - dtrace_printf("FIRST CPU SW: %d x=%lu\n", c, x); - -}*/ - /***********************************************/ - /* Try and detect a nested call from this */ - /* cpu whilst the mutex is held. */ - /***********************************************/ - if (mp->m_count && mp->m_type && mp->m_cpu == smp_processor_id()) { - dtrace_printf("%p mutex...fail in mutex_enter count=%d type=%d\n", mp, mp->m_count, mp->m_type); - } + cnt_mtx1++; - cnt_mtx2++; - mutex_enter_common(mp, FALSE); - if (disable_ints && irqs_disabled()) { - dtrace_printf("%p: mutex_enter with irqs disabled fl:%lx level:%d cpu:%d\n", - mp, mp->m_flags, mp->m_level, mp->m_cpu); - dtrace_printf("orig: init=%d fl:%lx cpu:%d\n", imp.m_initted, imp.m_flags, imp.m_cpu); - } + mutex_lock(mp); } -/**********************************************************************/ -/* Release mutex, called by interrupt context. */ -/**********************************************************************/ -void -dmutex_exit(mutex_t *mp) -{ unsigned long fl = mp->m_flags; - - if (--mp->m_level) - return; - - /* - if (mp->m_cpu != smp_processor_id()) - dtrace_printf("dmutex_exit:: cross cpu %d count=%d\n", mp->m_cpu, mp->m_count); - */ - - mp->m_count = 0; - if (disable_ints) - dtrace_interrupt_enable(fl); -//preempt_enable_no_resched(); -} /**********************************************************************/ -/* Release mutex, called by upper-half of kernel. */ +/* Release mutex */ /**********************************************************************/ void mutex_exit(mutex_t *mp) { - if (--mp->m_level) - return; - - - /* - if (mp->m_cpu != smp_processor_id()) { - static int xx; - dtrace_printf("mutex_exit:: cross cpu %d\n", mp->m_cpu); - if (xx++ < 5) - dump_stack(); - } - preempt_enable_no_resched(); - */ - - mp->m_count = 0; -} - -/**********************************************************************/ -/* Used by the assertion code MUTEX_LOCKED in dtrace.c */ -/**********************************************************************/ -int -dmutex_is_locked(mutex_t *mp) -{ - return mp->m_count != NULL; -} - -/**********************************************************************/ -/* Utility for debugging - print the state of the mutex. */ -/**********************************************************************/ -void -mutex_dump(mutex_t *mp) -{ - dtrace_printf("mutex: %p initted=%d count=%p flags=%lx cpu=%d type=%d level=%d\n", - mp, mp->m_initted, mp->m_count, mp->m_flags, mp->m_cpu, mp->m_type, - mp->m_level); + mutex_unlock(mp); } diff --git a/driver/printf.c b/driver/printf.c index b03989a..345e30a 100644 --- a/driver/printf.c +++ b/driver/printf.c @@ -133,9 +133,9 @@ dtrace_vprintf(const char *fmt, va_list ap) short l_mode; short zero; short width; -static char digits[] = "0123456789abcdef"; - hrtime_t hrt = dtrace_gethrtime(); -static hrtime_t hrt0; + hrtime_t hrt; + static char digits[] = "0123456789abcdef"; + static hrtime_t hrt0; # if 0 /***********************************************/ @@ -143,10 +143,14 @@ static hrtime_t hrt0; /* to see first entries. */ /***********************************************/ if (dbuf_i >= LOG_BUFSIZ - 2048) - return; + goto exit; # endif if (dtrace_printf_disable) - return; + goto exit; + + preempt_disable_notrace(); + + hrt = dtrace_gethrtime(); /***********************************************/ /* Try and avoid intermingled output from */ @@ -163,7 +167,7 @@ static hrtime_t hrt0; /* just for a bit more entropy. */ /***********************************************/ if (*fmt == '\0') - return; + goto exit; dtrace_printf_lock = smp_processor_id(); /***********************************************/ @@ -322,6 +326,8 @@ static hrtime_t hrt0; dtrace_printf_lock = -1; +exit: + preempt_enable_no_resched_notrace(); } /**********************************************************************/ diff --git a/driver/sdt_linux.c b/driver/sdt_linux.c index c8cc7fe..d635847 100644 --- a/driver/sdt_linux.c +++ b/driver/sdt_linux.c @@ -917,16 +917,16 @@ printk("io_prov_sdt: func=%s %s:%s:%s:%s\n", namebuf, provname, modname, probena /* rare, so any race conditions shouldnt */ /* exist to allow a re-entrancy problem. */ /***********************************************/ - dmutex_exit(&dtrace_provider_lock); + mutex_exit(&dtrace_provider_lock); if (dtrace_register(prov->sdtp_name, prov->sdtp_attr, DTRACE_PRIV_KERNEL, NULL, &sdt_pops, prov, &prov->sdtp_id) != 0) { - dmutex_enter(&dtrace_provider_lock); + mutex_enter(&dtrace_provider_lock); cmn_err(CE_WARN, "failed to register sdt provider %s", prov->sdtp_name); return 1; } - dmutex_enter(&dtrace_provider_lock); + mutex_enter(&dtrace_provider_lock); } name = kstrdup(probename, KM_SLEEP); diff --git a/driver/systrace.c b/driver/systrace.c index 78034a4..dac0b7f 100644 --- a/driver/systrace.c +++ b/driver/systrace.c @@ -173,7 +173,7 @@ dtrace_systrace_syscall2(int syscall, systrace_sysent_t *sy, # define linux_get_syscall() get_current()->thread.trap_no #endif -MUTEX_DEFINE(slock); +DEFINE_MUTEX(slock); static int do_slock; /**********************************************************************/ @@ -1378,7 +1378,7 @@ dtrace_systrace_syscall2(int syscall, systrace_sysent_t *sy, /* we are debugging. */ /***********************************************/ if (do_slock) { - dmutex_enter(&slock); + mutex_enter(&slock); } //printk("ia32 %s before\n", syscallnames32[syscall]); @@ -1404,12 +1404,12 @@ dtrace_systrace_syscall2(int syscall, systrace_sysent_t *sy, */ # if defined(TODOxxx) {proc_t *p = ttoproc(curthread); - dmutex_enter(&p->p_lock); + mutex_enter(&p->p_lock); if (curthread->t_dtrace_stop && !curthread->t_lwp->lwp_nostop) { curthread->t_dtrace_stop = 0; stop(PR_REQUESTED, 0); } - dmutex_exit(&p->p_lock); + mutex_exit(&p->p_lock); } # else { @@ -1595,7 +1595,7 @@ dtrace_systrace_syscall2(int syscall, systrace_sysent_t *sy, } if (do_slock) { - dmutex_exit(&slock); + mutex_exit(&slock); } return (rval); diff --git a/driver/taskq.c b/driver/taskq.c index 086a9b9..c648b3d 100644 --- a/driver/taskq.c +++ b/driver/taskq.c @@ -291,7 +291,7 @@ taskq_destroy(taskq_t *tq) /* executed in the context of a process which doesnt care or */ /* couldnt be locking dtrace - avoiding deadlock. This is only */ /* used for fasttrap garbage collection but we mustnt fire whilst */ -/* the tear down occurs, else dmutex_enter will deadlock or call */ +/* the tear down occurs, else mutex_enter will deadlock or call */ /* schedule() from an invalid context. */ /**********************************************************************/ diff --git a/driver/x_call.c b/driver/x_call.c index 356ce02..e17ef28 100644 --- a/driver/x_call.c +++ b/driver/x_call.c @@ -95,13 +95,6 @@ extern int ipi_vector; #if NCPU > 256 # warning "NCPU is large - your module may not load (x_call.c)" #endif -static struct xcalls { - dtrace_xcall_t xc_func; - void *xc_arg; - volatile int xc_state; - } *xcalls[NCPU]; - -static int xcall_levels[NCPU]; unsigned long cnt_xcall0; unsigned long cnt_xcall1; @@ -116,618 +109,13 @@ unsigned long cnt_ipi1; unsigned long cnt_nmi1; unsigned long cnt_nmi2; -/**********************************************************************/ -/* Prototypes. */ -/**********************************************************************/ -void orig_dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg); -void dtrace_xcall1(processorid_t cpu, dtrace_xcall_t func, void *arg); -static void dump_xcalls(void); -static void send_ipi_interrupt(cpumask_t *mask, int vector); -void xcall_slave2(void); - -/**********************************************************************/ -/* Called during driver startup to do one time initialisation, so */ -/* we dont end up doing symbol lookups in potentially critical */ -/* probe paths. */ -/**********************************************************************/ -void -xcall_init(void) -{ int i; - - if ((x_apic = get_proc_addr("apic")) == NULL && - (x_apic = get_proc_addr("apic_ops")) == NULL) { - /***********************************************/ - /* This might be a problem. It might not. */ - /***********************************************/ - printk("init_xcall: cannot locate 'apic'\n"); - } - if (x_apic) - x_apic = *(void **) x_apic; - - for (i = 0; i < nr_cpus; i++) { - xcalls[i] = kzalloc(nr_cpus * sizeof (struct xcalls), GFP_KERNEL); - if (xcalls[i] == NULL) { - dtrace_linux_panic("Cannot allocate xcalls[%d][%d] array.\n", nr_cpus, nr_cpus); - return; - } - } - - xen_xcall_init(); -} -void -xcall_fini(void) -{ int i; - - for (i = 0; i < nr_cpus; i++) - kfree(xcalls[i]); - xen_xcall_fini(); -} - -/**********************************************************************/ -/* Switch the interface from the orig dtrace_xcall code to the new */ -/* code. The orig code cannot come from a timer interrupt */ -/* (hr_timer code comes here). */ -/**********************************************************************/ void dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg) { - /***********************************************/ - /* If we arent ready for xcalls yet, then */ - /* do it the old race-condition way. This */ - /* will happen during/after driver load til */ - /* we get the symbol patchups. Its not a */ - /* problem for this scenario, and we should */ - /* resolve the dtrace_attach() vs */ - /* dtrace_linux_init() relative startup so */ - /* we can kill the older code. */ - /***********************************************/ - - /***********************************************/ - /* If we are on Xen, use the kernel */ - /* smp_call_function code - this is broken */ - /* - we get long latencies, but is better */ - /* than nothing. */ - /* */ - /* We need this for FBT tracing. */ - /***********************************************/ - if (!driver_initted || XCALL_MODE == XCALL_ORIG || dtrace_is_xen()) { - orig_dtrace_xcall(cpu, func, arg); - return; - } - -#if XCALL_MODE == XCALL_NEW - dtrace_xcall1(cpu, func, arg); -#endif -} - -/**********************************************************************/ -/* This code is similar to the Solaris original - except we rely */ -/* on smp_call_function, and that wont work from an interrupt. We */ -/* temporarily use this during driver initialisation and/or if we */ -/* decide at compile time to use this. */ -/**********************************************************************/ -void -orig_dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg) -{ -//dtrace_printf("orig_dtrace_xcall %lu\n", cnt_xcall1); - cnt_xcall1++; - - if (cpu == DTRACE_CPUALL) { - cnt_xcall2++; - /***********************************************/ - /* Avoid calling local_irq_disable, since */ - /* we will likely be called from the */ - /* hrtimer callback. */ - /***********************************************/ - preempt_disable(); - SMP_CALL_FUNCTION(func, arg, TRUE); - func(arg); - preempt_enable(); - } else { -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 26) - /***********************************************/ - /* 20090710 Special case where we are */ - /* trying to call ourselves, since */ - /* smp_call_function_single doesnt like us */ - /* doing this. Patch provided by */ - /* Nicolas.Williams@sun.com */ - /***********************************************/ - int me = get_cpu(); - - put_cpu(); - - if (me == cpu) { - local_irq_disable(); - func(arg); - local_irq_enable(); - return; - } - SMP_CALL_FUNCTION_SINGLE(cpu, func, arg, TRUE); -#else - SMP_CALL_FUNCTION_SINGLE(cpu, func, arg, TRUE); -#endif - } -} - -# if XCALL_MODE == XCALL_NEW -/**********************************************************************/ -/* Wait for the other cpu to get to an appropriate point. We put */ -/* in counters to avoid deadlocking ourselves - these shouldnt */ -/* happen under normal circumstances, but they can happen, e.g. if */ -/* other cpu is in a big disabled-interrupt mode. We dont like */ -/* breaking the locking protocol, but remember the xcalls are rare */ -/* - typically as dtrace is starting up or shutting down (see */ -/* /proc/dtrace/stats to see the numbers relative to actual */ -/* probes). */ -/**********************************************************************/ -int -ack_wait(int c, int attempts) -{ - unsigned long cnt = 0; - int cnt1 = 0; - volatile struct xcalls *xc = &xcalls[smp_processor_id()][c]; - - /***********************************************/ - /* Avoid holding on to a stale cache line. */ - /***********************************************/ - while (dtrace_cas32((void *) &xc->xc_state, XC_WORKING, XC_WORKING) != XC_IDLE) { - if (attempts-- <= 0) - return 0; - - barrier(); - - /***********************************************/ - /* Be HT friendly. */ - /***********************************************/ -// smt_pause(); - - cnt_xcall6++; - /***********************************************/ - /* Keep track of the max. */ - /***********************************************/ - if (cnt > cnt_xcall5) - cnt_xcall5 = cnt; - - /***********************************************/ - /* On my Dual Core 2.26GHz system, we are */ - /* seeing counters in the range of hundreds */ - /* to maybe 2,000,000 for more extreme */ - /* cases. (This is inside a VM). During */ - /* debugging, we found problems with the */ - /* two cores not seeing each other -- */ - /* possibly because I wasnt doing the right */ - /* things to ensure memory barriers were in */ - /* place. */ - /* */ - /* We dont want to wait forever because */ - /* that will crash/hang your machine, but */ - /* we do need to give up if its taken far */ - /* too long. */ - /***********************************************/ -// if (cnt++ == 50 * 1000 * 1000UL) { - if (cnt++ == 1 * 1000 * 1000UL) { - cnt = 0; - cnt_xcall4++; - - if (cnt1 == 0) { - /***********************************************/ - /* Looks like we are having trouble getting */ - /* the interrupt, so try for an NMI. */ - /***********************************************/ - cpumask_t mask; - cpus_clear(mask); - cpu_set(c, mask); -// nmi_masks[c] = 1; -// send_ipi_interrupt(&mask, 2); //NMI_VECTOR); - } - - if (1) { -// set_console_on(1); - dtrace_printf("ack_wait cpu=%d xcall %staking too long! c=%d [xcall1=%lu]\n", - smp_processor_id(), - cnt1 ? "STILL " : "", - c, cnt_xcall1); - //dump_stack(); -// set_console_on(0); - } - - if (cnt1++ > 3) { - dump_xcalls(); - dtrace_linux_panic("xcall taking too long"); - break; - } - } - } - - if (xcall_debug) { - dtrace_printf("[%d] ack_wait finished c=%d cnt=%lu (max=%lu)\n", smp_processor_id(), c, cnt, cnt_xcall5); - } - return 1; -} -/**********************************************************************/ -/* Linux version of the cpu cross call code. We need to avoid */ -/* smp_call_function() as it is not callable from an interrupt */ -/* routine. Instead, we utilise our own private interrupt vector */ -/* to send an IPI to the other cpus to validate we are synced up. */ -/* */ -/* Why do we do this? Because we dont know where the other cpus */ -/* are - traditional mutexes cannot guard against people being */ -/* inside locked regions without expensive locking protocols. */ -/* */ -/* On Solaris, the kernel supports arbitrary nested cross-calling, */ -/* but on linux, the restrictions are more severe. We attempt to */ -/* emulate what Solaris does. */ -/* */ -/* The target functions that can be called are minimalist - so we */ -/* dont need to worry about lots of deadlock complexity for the */ -/* client functions. */ -/**********************************************************************/ -void dtrace_xcall2(processorid_t cpu, dtrace_xcall_t func, void *arg); -void -dtrace_xcall1(processorid_t cpu, dtrace_xcall_t func, void *arg) -{ - - /***********************************************/ - /* Just track re-entrancy events - we will */ - /* be lockless in dtrace_xcall2. */ - /***********************************************/ - if (in_xcall >= 0 && (cnt_xcall0 < 500 || (cnt_xcall0 % 50) == 0)) { - dtrace_printf("x_call: re-entrant call in progress (%d) other=%d.\n", cnt_xcall0, in_xcall); - cnt_xcall0++; - } - in_xcall = smp_processor_id(); -//int flags = dtrace_interrupt_disable(); - dtrace_xcall2(cpu, func, arg); -//dtrace_interrupt_enable(flags); - in_xcall = -1; -} -void -dtrace_xcall2(processorid_t cpu, dtrace_xcall_t func, void *arg) -{ int c; - int cpu_id = smp_processor_id(); - int cpus_todo = 0; -# if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 24) -typedef struct cpumask cpumask_t; -//#define cpu_set(c, mask) cpumask_set_cpu(c, &(mask)) -//#define cpus_clear(mask) cpumask_clear(&mask) -# endif - cpumask_t mask; - - /***********************************************/ - /* If we had an internal panic, stop doing */ - /* xcalls. Shouldnt happen, but useful */ - /* during debugging so we can diagnose what */ - /* caused the panic. */ - /***********************************************/ - if (dtrace_shutdown) - return; - /***********************************************/ - /* Special case - just 'us'. */ - /***********************************************/ - cnt_xcall1++; - if (cpu_id == cpu) { - local_irq_disable(); -//dtrace_printf("[%d] sole cnt=%lu\n", smp_processor_id(), cnt_xcall1); - func(arg); - local_irq_enable(); - return; - } - - /***********************************************/ - /* Set up the cpu mask to do just the */ - /* relevant cpu. */ - /***********************************************/ - if (cpu != DTRACE_CPUALL) { -//dtrace_printf("just me %d %d\n", cpu_id, cpu); - cpu = 1 << cpu; - } - - -//dtrace_printf("xcall %d f=%p\n", cpu_id, func); - cnt_xcall2++; - if (xcall_levels[cpu_id]++) - cnt_xcall3++; - /***********************************************/ - /* Set up the rendezvous with the other */ - /* targetted cpus. We use a nearly square */ - /* NCPU*NCPU matrix to allow for any cpu to */ - /* wait for any other. We have two slots */ - /* per cpu - because we may be in an */ - /* interrupt. */ - /* */ - /* The interrupt slave will service all */ - /* queued calls - sometimes it will be */ - /* lucky and see multiple, especially if we */ - /* are heavily loaded. */ - /***********************************************/ - cpus_clear(mask); - for (c = 0; c < nr_cpus; c++) { - struct xcalls *xc = &xcalls[cpu_id][c]; - unsigned int cnt; - - /***********************************************/ - /* Dont set ourselves - we dont want our */ - /* cpu to be taking an IPI interrupt and */ - /* doing the work twice. We inline */ - /* ourselves below. */ - /***********************************************/ - if ((cpu & (1 << c)) == 0 || c == cpu_id) { - continue; - } - - /***********************************************/ - /* Is this safe? We want to avoid an IPI */ - /* call if the other cpu is idle/not doing */ - /* dtrace work. If thats the case and we */ - /* are calling dtrace_sync, then we can */ - /* avoid the xcall. */ - /***********************************************/ - if ((void *) func == (void *) dtrace_sync_func && - cpu_core[c].cpuc_probe_level == 0) { - cpu &= ~(1 << c); - cnt_xcall7++; - continue; - } -//dtrace_printf("xcall %p\n", func); - - xc->xc_func = func; - xc->xc_arg = arg; - /***********************************************/ - /* Spinlock in case the interrupt hasnt */ - /* fired. This should be very rare, and */ - /* when it happens, we would be hanging for */ - /* 100m iterations (about 1s). We reduce */ - /* the chance of a hit by using the */ - /* NCPU*NCPU*2 array approach. These things */ - /* happen when buffers are full or user is */ - /* ^C-ing dtrace. */ - /***********************************************/ - for (cnt = 0; dtrace_cas32((void *) &xc->xc_state, XC_WORKING, XC_WORKING) == XC_WORKING; cnt++) { - /***********************************************/ - /* Avoid noise for tiny windows. */ - /***********************************************/ -if(0) - if ((cnt == 0 && xcall_debug) || !(xcall_debug && cnt == 50)) { - dtrace_printf("[%d] cpu%d in wrong state (state=%d)\n", - smp_processor_id(), c, xc->xc_state); - } -// xcall_slave2(); - if (cnt == 100 * 1000 * 1000) { - dtrace_printf("[%d] cpu%d - busting lock\n", - smp_processor_id(), c); - break; - } - } - if ((cnt && xcall_debug) || (!xcall_debug && cnt > 50)) { - dtrace_printf("[%d] cpu%d in wrong state (state=%d) %u cycles\n", - smp_processor_id(), c, xc->xc_state, cnt); - } - /***********************************************/ - /* As soon as we set xc_state and BEFORE */ - /* the apic call, the cpu may see the */ - /* change since it may be taking an IPI */ - /* interrupt for someone else. We need to */ - /* be careful with barriers (I think - */ - /* although the clflush/wmb may be */ - /* redundant). */ - /***********************************************/ - xc->xc_state = XC_WORKING; -// clflush(&xc->xc_state); -// smp_wmb(); - cpu_set(c, mask); - cpus_todo++; - } - - smp_mb(); - - /***********************************************/ - /* Now tell the other cpus to do some work. */ - /***********************************************/ - if (cpus_todo) - send_ipi_interrupt(&mask, ipi_vector); - - /***********************************************/ - /* Check for ourselves. */ - /***********************************************/ - if (cpu & (1 << cpu_id)) { - func(arg); - } - - if (xcall_debug) - dtrace_printf("[%d] getting ready.... (%ld) mask=%x func=%p\n", smp_processor_id(), cnt_xcall1, *(int *) &mask, func); - - /***********************************************/ - /* Wait for the cpus we invoked the IPI on. */ - /* Cycle thru the cpus, to avoid mutual */ - /* deadlock between one cpu trying to call */ - /* us whilst we are calling them. */ - /***********************************************/ - while (cpus_todo > 0) { - for (c = 0; c < nr_cpus && cpus_todo > 0; c++) { - xcall_slave2(); - if (c == cpu_id || (cpu & (1 << c)) == 0) - continue; - - /***********************************************/ - /* Wait a little while for this cpu to */ - /* respond before going on to the next one. */ - /***********************************************/ - if (ack_wait(c, 100)) { - cpus_todo--; - cpu &= ~(1 << c); - } - } -break; - } -// smp_mb(); - - xcall_levels[cpu_id]--; -} -#endif - -/**********************************************************************/ -/* Used for debugging. */ -/**********************************************************************/ -static void -dump_xcalls(void) -{ -/* - int i; - for (i = 0; i < nr_cpus; i++) { - dtrace_printf(" cpu%d: state=%d/%s\n", i, - xcalls[i].xc_state, - xcalls[i].xc_state == XC_IDLE ? "idle" : - xcalls[i].xc_state == XC_WORKING ? "work" : - "??"); - } -*/ -} -/**********************************************************************/ -/* Send interrupt request to target cpus. */ -/**********************************************************************/ -static void -send_ipi_interrupt(cpumask_t *mask, int vector) -{ - - if (dtrace_is_xen()) { - xen_send_ipi(mask, vector); - return; - } -# if defined(__arm__) - TODO(); - -# elif LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18) - /***********************************************/ - /* Theres 'flat' and theres 'cluster'. The */ - /* cluster functions handle more than 8 */ - /* cpus. The flat does not - since the APIC */ - /* only has room for an 8-bit cpu mask. */ - /***********************************************/ - static void (*send_IPI_mask)(cpumask_t, int); - if (send_IPI_mask == NULL) - send_IPI_mask = get_proc_addr("cluster_send_IPI_mask"); - if (send_IPI_mask == NULL) dtrace_printf("HELP ON send_ipi_interrupt!\n"); else - send_IPI_mask(*mask, vector); -# elif LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 28) - /***********************************************/ - /* Issue with GPL/inlined function. */ - /***********************************************/ - { - void send_IPI_mask_sequence(cpumask_t mask, int vector); - static void (*send_IPI_mask_sequence_ptr)(cpumask_t, int); - if (send_IPI_mask_sequence_ptr == NULL) - send_IPI_mask_sequence_ptr = get_proc_addr("send_IPI_mask_sequence"); - send_IPI_mask_sequence_ptr(*mask, vector); - } -# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28) - send_IPI_mask(*mask, vector); -# else - if (x_apic == NULL) { - static void (*flat_send_IPI_mask)(cpumask_t *, int); - static void (*default_send_IPI_allbutself)(int); - - - if (flat_send_IPI_mask == NULL && - default_send_IPI_allbutself == NULL) { - flat_send_IPI_mask = get_proc_addr("flat_send_IPI_mask"); - default_send_IPI_allbutself = get_proc_addr("default_send_IPI_allbutself"); - } - - if (flat_send_IPI_mask) { - flat_send_IPI_mask(mask, vector); - return; - } - - /***********************************************/ - /* Arch Linux - 3.4 i386 */ - /***********************************************/ - if (default_send_IPI_allbutself) { - default_send_IPI_allbutself(vector); - return; - } - - dtrace_linux_panic("x_apic is null - giving up\n"); - return; - } - x_apic->send_IPI_mask(mask, vector); -# endif -} -/**********************************************************************/ -/* Do NOT inline this function in any way. Doing so may affect */ -/* your VM - I could crash VirtualBox 4.1.18 when this function is */ -/* inlined. */ -/**********************************************************************/ -void dtrace_ack_apic(void) -{ -# if defined(__i386) || defined(__amd64) - *((volatile u32 *) (APIC_BASE + APIC_EOI)) = 0; -# endif + if (cpu == DTRACE_CPUALL) + smp_call_function(func, arg, 1); + else + smp_call_function_single(cpu, func, arg, 1); } -/**********************************************************************/ -/* This is the IPI interrupt handler - we got invoked, so we must */ -/* have something to do. */ -/**********************************************************************/ -void -xcall_slave(void) -{ - cnt_ipi1++; - - xcall_slave2(); - - smp_mb(); - - /***********************************************/ - /* We want to call ack_APIC_irq, but we */ - /* inline the expansion because of the GPL */ - /* symbol issue. */ - /* Once we do this, we can have more IPI */ - /* interrupts arrive (but not until we exit */ - /* the interrupt routine and re-enable */ - /* interrupts). */ - /***********************************************/ - - /***********************************************/ - /* Go direct to the assembler instruction. */ - /* The APIC interface changed too much over */ - /* the course of Linux kernel evolution, */ - /* and some bits became GPL. There may be a */ - /* price on non-standard APIC hardware, or */ - /* paravirt kernels, but this seems to work */ - /* for now. */ - /***********************************************/ - dtrace_ack_apic(); -#if 0 - /***********************************************/ - /* Lots of ways to ack the APIC, but they */ - /* all have problems. */ - /***********************************************/ -//native_apic_mem_write(APIC_EOI, 0); -// *((volatile u32 *) (APIC_BASE + APIC_EOI)) = 0; -# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28) - ack_APIC_irq(); -# else - x_apic->write(APIC_EOI, 0); -# endif -#endif - -} -void -xcall_slave2(void) -{ int i; - - /***********************************************/ - /* Check each slot for this cpu - one from */ - /* each of the other cpus and one for */ - /* interrupt mode and non-interrupt mode in */ - /* each cpu. */ - /***********************************************/ - for (i = 0; i < nr_cpus; i++) { - struct xcalls *xc = &xcalls[i][smp_processor_id()]; - if (xc->xc_state == XC_WORKING) { - (*xc->xc_func)(xc->xc_arg); - xc->xc_state = XC_IDLE; - } - } -} diff --git a/libdtrace/dt_grammar.y b/libdtrace/dt_grammar.y index b0741f5..0c12623 100644 --- a/libdtrace/dt_grammar.y +++ b/libdtrace/dt_grammar.y @@ -24,12 +24,9 @@ * Use is subject to license terms. */ -#pragma ident "@(#)dt_grammar.y 1.9 06/01/07 SMI" +#pragma ident "%Z%%M% %I% %E% SMI" -#define YYERROR_VERBOSE -#include #include -#include #define OP1(op, c) dt_node_op1(op, c) #define OP2(op, l, r) dt_node_op2(op, l, r) @@ -48,21 +45,21 @@ } %token DT_TOK_COMMA DT_TOK_ELLIPSIS -%token DT_TOK_ASGN "=" DT_TOK_ADD_EQ "+=" DT_TOK_SUB_EQ "-=" DT_TOK_MUL_EQ "*=" -%token DT_TOK_DIV_EQ "/=" DT_TOK_MOD_EQ "%=" DT_TOK_AND_EQ "&=" DT_TOK_XOR_EQ "^=" DT_TOK_OR_EQ "|=" -%token DT_TOK_LSH_EQ "<<=" DT_TOK_RSH_EQ ">>=" DT_TOK_QUESTION "?" DT_TOK_COLON ":" -%token DT_TOK_LOR "||" DT_TOK_LXOR "^^" DT_TOK_LAND "&&" +%token DT_TOK_ASGN DT_TOK_ADD_EQ DT_TOK_SUB_EQ DT_TOK_MUL_EQ +%token DT_TOK_DIV_EQ DT_TOK_MOD_EQ DT_TOK_AND_EQ DT_TOK_XOR_EQ DT_TOK_OR_EQ +%token DT_TOK_LSH_EQ DT_TOK_RSH_EQ DT_TOK_QUESTION DT_TOK_COLON +%token DT_TOK_LOR DT_TOK_LXOR DT_TOK_LAND %token DT_TOK_BOR DT_TOK_XOR DT_TOK_BAND DT_TOK_EQU DT_TOK_NEQ -%token DT_TOK_LT "<" DT_TOK_LE "<=" DT_TOK_GT ">" DT_TOK_GE ">=" DT_TOK_LSH "<<" DT_TOK_RSH ">>" -%token DT_TOK_ADD "+" DT_TOK_SUB "-" DT_TOK_MUL "*" DT_TOK_DIV "/" DT_TOK_MOD "%" -%token DT_TOK_LNEG "!" DT_TOK_BNEG "~" DT_TOK_ADDADD "++" DT_TOK_SUBSUB "--" +%token DT_TOK_LT DT_TOK_LE DT_TOK_GT DT_TOK_GE DT_TOK_LSH DT_TOK_RSH +%token DT_TOK_ADD DT_TOK_SUB DT_TOK_MUL DT_TOK_DIV DT_TOK_MOD +%token DT_TOK_LNEG DT_TOK_BNEG DT_TOK_ADDADD DT_TOK_SUBSUB %token DT_TOK_PREINC DT_TOK_POSTINC DT_TOK_PREDEC DT_TOK_POSTDEC %token DT_TOK_IPOS DT_TOK_INEG DT_TOK_DEREF DT_TOK_ADDROF -%token DT_TOK_OFFSETOF "offsetof" DT_TOK_SIZEOF "sizeof" DT_TOK_STRINGOF "stringof" DT_TOK_XLATE "xlate" -%token DT_TOK_LPAR "(" DT_TOK_RPAR ")" DT_TOK_LBRAC "{" DT_TOK_RBRAC "}" DT_TOK_PTR "->" DT_TOK_DOT "." +%token DT_TOK_OFFSETOF DT_TOK_SIZEOF DT_TOK_STRINGOF DT_TOK_XLATE +%token DT_TOK_LPAR DT_TOK_RPAR DT_TOK_LBRAC DT_TOK_RBRAC DT_TOK_PTR DT_TOK_DOT -%token DT_TOK_STRING "" -%token DT_TOK_IDENT "identifier" +%token DT_TOK_STRING +%token DT_TOK_IDENT %token DT_TOK_PSPEC %token DT_TOK_AGG %token DT_TOK_TNAME @@ -160,12 +157,8 @@ %type init_declarator_list %type init_declarator -%type parameter_declaration_specifiers - -%type declaration_specifiers %type type_specifier %type type_qualifier -%type storage_class_specifier %type struct_or_union_specifier %type specifier_qualifier_list %type enum_specifier @@ -213,10 +206,9 @@ %% -/* Removed DT_TOK_EOF due to portability issues amongst bison/yacc */ -dtrace_program: d_expression { return (dt_node_root($1)); } - | d_program { return (dt_node_root($1)); } - | d_type { return (dt_node_root($1)); } +dtrace_program: d_expression DT_TOK_EOF { return (dt_node_root($1)); } + | d_program DT_TOK_EOF { return (dt_node_root($1)); } + | d_type DT_TOK_EOF { return (dt_node_root($1)); } ; d_expression: DT_CTX_DEXPR { $$ = NULL; } @@ -601,21 +593,21 @@ declaration: declaration_specifiers ';' { ; declaration_specifiers: - d_storage_class_specifier { $$ = 0; } - | d_storage_class_specifier declaration_specifiers { $$ = $2; } - | type_specifier + d_storage_class_specifier + | d_storage_class_specifier declaration_specifiers + | type_specifier | type_specifier declaration_specifiers - | type_qualifier + | type_qualifier | type_qualifier declaration_specifiers ; parameter_declaration_specifiers: - storage_class_specifier { $$ = $1; } - | storage_class_specifier declaration_specifiers { $$ = $2; } - | type_specifier { $$ = $1;} - | type_specifier declaration_specifiers { $$ = $2; } - | type_qualifier { $$ = $1; } - | type_qualifier declaration_specifiers { $$ = $2;} + storage_class_specifier + | storage_class_specifier declaration_specifiers + | type_specifier + | type_specifier declaration_specifiers + | type_qualifier + | type_qualifier declaration_specifiers ; storage_class_specifier: @@ -627,7 +619,7 @@ storage_class_specifier: ; d_storage_class_specifier: - storage_class_specifier { } + storage_class_specifier | DT_KEY_SELF { dt_decl_class(DT_DC_SELF); } | DT_KEY_THIS { dt_decl_class(DT_DC_THIS); } ; diff --git a/linux/linux_types.h b/linux/linux_types.h index 4660f09..f88d975 100644 --- a/linux/linux_types.h +++ b/linux/linux_types.h @@ -104,7 +104,7 @@ struct modctl; # include # endif - #define MUTEX_NOT_HELD(x) !dtrace_mutex_is_locked(x) + #define MUTEX_NOT_HELD(x) !mutex_is_locked(x) #define PS_VM 0x00020000 /* CPU in v8086 mode */ @@ -265,24 +265,10 @@ typedef unsigned long long hrtime_t; /* semaphores, since we cannot use a mutex */ /* inside an interrupt. */ /***********************************************/ - typedef struct mutex_t { - struct semaphore m_sem; - void *m_count; - unsigned long m_flags; - int m_cpu; - int m_level; - int m_initted; - int m_type; - } mutex_t; + typedef struct mutex mutex_t; #define kmutex_t mutex_t - #define MUTEX_DEFINE(name) mutex_t name = {.m_initted = 2 } - void dmutex_init(mutex_t *mp); - void dmutex_enter(mutex_t *mp); - void dmutex_exit(mutex_t *mp); void mutex_enter(mutex_t *mp); void mutex_exit(mutex_t *mp); - int dmutex_is_locked(mutex_t *mp); - void mutex_dump(mutex_t *mp); # include # include diff --git a/linux/strings.h b/linux/strings.h index 0cdd452..dacb029 100644 --- a/linux/strings.h +++ b/linux/strings.h @@ -4,3 +4,5 @@ //# undef __USE_BSD /* Avoid bzero() being defined on glibc 2.7 */ # include +size_t +strlcat(char *dst, const char *src, size_t len);