Lines Matching defs:s_mp

84 				dr_mem_unit_t *t_mp, dr_mem_unit_t *s_mp,
93 struct memlist *t_ml, dr_mem_unit_t *s_mp,
96 dr_mem_unit_t *s_mp);
98 struct memlist *t_mlist, dr_mem_unit_t *s_mp,
429 dr_move_memory(dr_handle_t *hp, dr_mem_unit_t *s_mp, dr_mem_unit_t *t_mp)
442 s_mp->sbm_cm.sbdev_path,
445 ASSERT(s_mp->sbm_flags & DR_MFLAG_SOURCE);
446 ASSERT(s_mp->sbm_peer == t_mp);
447 ASSERT(s_mp->sbm_mlist);
450 ASSERT(t_mp->sbm_peer == s_mp);
455 * the full source board memlist. s_mp->sbm_del_mlist
459 c_ml = memlist_dup(s_mp->sbm_mlist);
460 d_ml = s_mp->sbm_del_mlist;
486 t_mp->sbm_cm.sbdev_id, s_mp->sbm_cm.sbdev_id, c_ml, &cr_id);
488 DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err);
518 s_mp->sbm_cm.sbdev_error = hp->h_err;
537 s_bp = s_mp->sbm_cm.sbdev_bp;
552 DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err);
567 dr_mem_unit_t *s_mp = (dr_mem_unit_t *)cp;
575 if (s_mp->sbm_flags & DR_MFLAG_SOURCE) {
576 t_mp = s_mp->sbm_peer;
578 ASSERT(t_mp->sbm_peer == s_mp);
584 state = s_mp->sbm_cm.sbdev_state;
586 dr_dev_err(CE_IGNORE, &s_mp->sbm_cm, ESBD_STATE);
610 err = drmach_mem_disable(s_mp->sbm_cm.sbdev_id);
612 DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err);
616 rv = dr_move_memory(hp, s_mp, t_mp);
620 s_mp->sbm_cm.sbdev_bp->b_num,
624 (void) dr_cancel_mem(s_mp);
631 err = drmach_unconfigure(s_mp->sbm_cm.sbdev_id, 0);
1119 dr_process_excess_mlist(dr_mem_unit_t *s_mp,
1137 * Remove s_mp->sbm_del_mlist from
1140 for (ml = s_mp->sbm_del_mlist; ml;
1147 s_mp->sbm_cm.sbdev_id,
1150 DRERR_SET_C(&s_mp->
1229 dr_add_memory_spans(s_mp, &ml0);
1232 dr_add_memory_spans(s_mp, &ml0);
1240 dr_post_detach_mem_unit(dr_mem_unit_t *s_mp)
1242 uint64_t sz = s_mp->sbm_slice_size;
1258 /* s_mp->sbm_del_mlist could be NULL, meaning no deleted spans */
1260 f, s_mp->sbm_cm.sbdev_path);
1261 PR_MEMLIST_DUMP(s_mp->sbm_del_mlist);
1264 ASSERT(s_mp->sbm_del_mlist == NULL ||
1265 (s_mp->sbm_flags & DR_MFLAG_RELDONE) != 0);
1267 if (s_mp->sbm_flags & DR_MFLAG_SOURCE) {
1268 t_mp = s_mp->sbm_peer;
1271 ASSERT(t_mp->sbm_peer == s_mp);
1290 if (s_mp->sbm_flags & DR_MFLAG_RELDONE) {
1291 x_mp = s_mp;
1309 if (s_mp->sbm_cm.sbdev_error) {
1311 s_mp->sbm_cm.sbdev_path, s_mp->sbm_flags);
1312 DR_DEV_CLR_UNREFERENCED(&s_mp->sbm_cm);
1313 DR_DEV_CLR_RELEASED(&s_mp->sbm_cm);
1314 dr_device_transition(&s_mp->sbm_cm, DR_STATE_CONFIGURED);
1319 s_mp->sbm_cm.sbdev_path, s_mp->sbm_flags);
1328 s_old_basepa = _ptob64(s_mp->sbm_basepfn);
1329 err = drmach_mem_get_info(s_mp->sbm_cm.sbdev_id, &minfo);
1351 s_copy_mlist = memlist_dup(s_mp->sbm_mlist);
1352 for (ml = s_mp->sbm_del_mlist; ml; ml = ml->ml_next) {
1380 for (ml = s_mp->sbm_mlist; ml; ml = ml->ml_next) {
1386 PR_MEMLIST_DUMP(s_mp->sbm_mlist);
1388 PR_MEMLIST_DUMP(s_mp->sbm_dyn_segs);
1397 t_mp->sbm_dyn_segs = s_mp->sbm_dyn_segs;
1398 s_mp->sbm_dyn_segs = NULL;
1418 for (ml = s_mp->sbm_del_mlist; ml; ml = ml->ml_next) {
1433 err = dr_process_excess_mlist(s_mp, t_mp,
1442 * s_mp->sbm_del_mlist may still needed
1447 PR_MEMLIST_DUMP(s_mp->sbm_del_mlist);
1467 for (ml = s_mp->sbm_del_mlist; !s_excess_mem_deleted && ml;
1472 err = drmach_mem_del_span(s_mp->sbm_cm.sbdev_id,
1475 DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err);
1481 err = drmach_mem_del_span(s_mp->sbm_cm.sbdev_id,
1484 DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err);
1515 * words, when s_mp->sbm_cm.sbdev_error is NULL). This memlist is
1523 if (s_mp->sbm_del_mlist && s_mp->sbm_del_mlist != s_mp->sbm_mlist)
1524 memlist_delete(s_mp->sbm_del_mlist);
1526 if (s_mp->sbm_cm.sbdev_error && s_mp->sbm_mlist) {
1527 memlist_delete(s_mp->sbm_mlist);
1528 s_mp->sbm_mlist = NULL;
1531 if (s_mp->sbm_dyn_segs != NULL && s_mp->sbm_cm.sbdev_error == 0) {
1532 memlist_delete(s_mp->sbm_dyn_segs);
1533 s_mp->sbm_dyn_segs = NULL;
1536 s_mp->sbm_del_mlist = NULL;
1537 s_mp->sbm_peer = NULL;
1538 s_mp->sbm_flags = 0;
1539 s_mp->sbm_cm.sbdev_busy = 0;
1540 dr_init_mem_unit_data(s_mp);
1542 PR_MEM("%s: cached memlist for %s:", f, s_mp->sbm_cm.sbdev_path);
1543 PR_MEMLIST_DUMP(s_mp->sbm_mlist);
1730 dr_mem_unit_t *s_mp = (dr_mem_unit_t *)cp;
1739 if (s_mp->sbm_flags & DR_MFLAG_SOURCE) {
1740 t_mp = s_mp->sbm_peer;
1742 ASSERT(t_mp->sbm_peer == s_mp);
1751 ASSERT(s_mp->sbm_flags & DR_MFLAG_RELOWNER);
1752 ASSERT(s_mp->sbm_flags & DR_MFLAG_RESERVED);
1753 rv = kphysm_del_release(s_mp->sbm_memhandle);
1762 s_mp->sbm_flags &= ~DR_MFLAG_RELOWNER;
1769 if (s_mp->sbm_cm.sbdev_error != NULL) {
1785 if (s_mp->sbm_del_mlist != s_mp->sbm_mlist)
1786 memlist_delete(s_mp->sbm_del_mlist);
1787 s_mp->sbm_del_mlist = NULL;
1789 if (s_mp->sbm_mlist != NULL) {
1790 memlist_delete(s_mp->sbm_mlist);
1791 s_mp->sbm_mlist = NULL;
1794 s_mp->sbm_peer = NULL;
1795 s_mp->sbm_flags = 0;
1796 s_mp->sbm_cm.sbdev_busy = 0;
1802 DR_DEV_SET_RELEASED(&s_mp->sbm_cm);
1803 dr_device_transition(&s_mp->sbm_cm, DR_STATE_RELEASE);
1832 if (s_mp->sbm_del_mlist != NULL) {
1833 mp = s_mp;
1849 DR_DEV_INTERNAL_ERROR(&s_mp->sbm_cm);
1853 s_mp->sbm_flags |= DR_MFLAG_RELDONE;
1858 if (dr_release_dev_done(&s_mp->sbm_cm) != 0) {
1865 f, s_mp->sbm_cm.sbdev_path);
1867 s_mp->sbm_cm.sbdev_ostate = SBD_STAT_UNCONFIGURED;
1923 dr_cancel_mem(dr_mem_unit_t *s_mp)
1929 state = s_mp->sbm_cm.sbdev_state;
1931 if (s_mp->sbm_flags & DR_MFLAG_TARGET) {
1935 } else if (s_mp->sbm_flags & DR_MFLAG_SOURCE) {
1936 t_mp = s_mp->sbm_peer;
1938 ASSERT(t_mp->sbm_peer == s_mp);
1950 ASSERT((s_mp->sbm_flags & DR_MFLAG_RELOWNER) == 0);
1960 if (s_mp->sbm_del_mlist != NULL) {
1962 f, s_mp->sbm_cm.sbdev_path);
1964 dr_add_memory_spans(s_mp, s_mp->sbm_del_mlist);
1972 ASSERT((s_mp->sbm_flags & DR_MFLAG_RELOWNER) == 0);
1994 if (s_mp->sbm_del_mlist != s_mp->sbm_mlist)
1995 memlist_delete(s_mp->sbm_del_mlist);
1996 s_mp->sbm_del_mlist = NULL;
1998 if (s_mp->sbm_mlist != NULL) {
1999 memlist_delete(s_mp->sbm_mlist);
2000 s_mp->sbm_mlist = NULL;
2003 s_mp->sbm_peer = NULL;
2004 s_mp->sbm_flags = 0;
2005 s_mp->sbm_cm.sbdev_busy = 0;
2006 dr_init_mem_unit_data(s_mp);
2012 f, (int)state, s_mp->sbm_cm.sbdev_path);
2138 dr_mem_unit_t *s_mp, struct memlist *s_ml)
2162 ASSERT(s_mp->sbm_npages != 0);
2167 rv = kphysm_del_span_query(s_mp->sbm_basepfn, s_mp->sbm_npages, &s_mq);
2171 f, s_mp->sbm_cm.sbdev_path, rv, s_mp->sbm_basepfn,
2172 s_mp->sbm_npages);
2180 s_mp->sbm_cm.sbdev_path, s_mq.first_nonrelocatable,
2186 for (ml = s_mp->sbm_dyn_segs; ml; ml = ml->ml_next) {
2218 if (s_mp == t_mp) {
2241 preference = dr_get_target_preference(hp, t_mp, s_mp,
2360 x_ml = dr_get_copy_mlist(s_ml, t_ml, s_mp, t_mp);
2372 s_mp->sbm_cm.sbdev_path);
2376 if (dr_reserve_mem_spans(&s_mp->sbm_memhandle,
2388 s_mp->sbm_flags |= DR_MFLAG_RESERVED;
2398 if (dr_reserve_mem_spans(&s_mp->sbm_memhandle, t_ml) == 0) {
2417 * (s_mp->sbm_memhandle) is kphysm_del_release'd.
2420 s_mp->sbm_flags &= ~DR_MFLAG_RESERVED;
2436 f, s_mp->sbm_cm.sbdev_path);
2447 s_mp->sbm_cm.sbdev_path);
2449 s_mp->sbm_peer = c_mp;
2450 s_mp->sbm_flags |= DR_MFLAG_SOURCE;
2451 s_mp->sbm_del_mlist = d_ml; /* spans to be deleted, if any */
2452 s_mp->sbm_mlist = s_ml;
2453 s_mp->sbm_cm.sbdev_busy = 1;
2455 c_mp->sbm_peer = s_mp;
2473 dr_mem_unit_t *t_mp, dr_mem_unit_t *s_mp,
2487 if (dr_memlist_canfit(s_ml, t_ml, s_mp, t_mp)) {
2488 if (s_mp->sbm_npages == t_mp->sbm_npages)
2497 s_nonreloc_ml = dr_get_nonreloc_mlist(b_ml, s_mp);
2502 if (dr_memlist_canfit(s_nonreloc_ml, t_ml, s_mp, t_mp))
2535 dr_mem_unit_t *s_mp, dr_mem_unit_t *t_mp)
2544 ASSERT(t_mp->sbm_slice_size == s_mp->sbm_slice_size);
2546 s_slice_mask = s_mp->sbm_slice_size - 1;
2597 for (dyn = s_mp->sbm_dyn_segs; dyn != NULL;
2639 dr_get_nonreloc_mlist(struct memlist *s_ml, dr_mem_unit_t *s_mp)
2646 PR_MEMLIST_DUMP(s_mp->sbm_dyn_segs);
2684 for (dyn = s_mp->sbm_dyn_segs; dyn != NULL;
2704 PR_MEM("%s: %s: edited source memlist:\n", f, s_mp->sbm_cm.sbdev_path);
2716 dr_mem_unit_t *s_mp, dr_mem_unit_t *t_mp)
2731 s_slice_mask = s_mp->sbm_slice_size - 1;