Lines Matching defs:tep
246 * tep structure, so it should be never freed.
1140 tl_refhold(tl_endpt_t *tep)
1142 atomic_inc_32(&tep->te_refcnt);
1146 tl_refrele(tl_endpt_t *tep)
1148 ASSERT(tep->te_refcnt != 0);
1150 if (atomic_dec_32_nv(&tep->te_refcnt) == 0)
1151 tl_free(tep);
1158 tl_endpt_t *tep = buf;
1160 bzero(tep, sizeof (tl_endpt_t));
1161 mutex_init(&tep->te_closelock, NULL, MUTEX_DEFAULT, NULL);
1162 cv_init(&tep->te_closecv, NULL, CV_DEFAULT, NULL);
1163 mutex_init(&tep->te_srv_lock, NULL, MUTEX_DEFAULT, NULL);
1164 cv_init(&tep->te_srv_cv, NULL, CV_DEFAULT, NULL);
1165 mutex_init(&tep->te_ser_lock, NULL, MUTEX_DEFAULT, NULL);
1174 tl_endpt_t *tep = buf;
1176 mutex_destroy(&tep->te_closelock);
1177 cv_destroy(&tep->te_closecv);
1178 mutex_destroy(&tep->te_srv_lock);
1179 cv_destroy(&tep->te_srv_cv);
1180 mutex_destroy(&tep->te_ser_lock);
1184 tl_free(tl_endpt_t *tep)
1186 ASSERT(tep->te_refcnt == 0);
1187 ASSERT(tep->te_transport != NULL);
1188 ASSERT(tep->te_rq == NULL);
1189 ASSERT(tep->te_wq == NULL);
1190 ASSERT(tep->te_ser != NULL);
1191 ASSERT(tep->te_ser_count == 0);
1192 ASSERT(! (tep->te_flag & TL_ADDRHASHED));
1194 if (IS_SOCKET(tep)) {
1195 ASSERT(tep->te_alen == TL_SOUX_ADDRLEN);
1196 ASSERT(tep->te_abuf == &tep->te_uxaddr);
1197 ASSERT(tep->te_vp == (void *)(uintptr_t)tep->te_minor);
1198 ASSERT(tep->te_magic == SOU_MAGIC_IMPLICIT);
1199 } else if (tep->te_abuf != NULL) {
1200 kmem_free(tep->te_abuf, tep->te_alen);
1201 tep->te_alen = -1; /* uninitialized */
1202 tep->te_abuf = NULL;
1204 ASSERT(tep->te_alen == -1);
1207 id_free(tl_minors, tep->te_minor);
1208 ASSERT(tep->te_credp == NULL);
1210 if (tep->te_hash_hndl != NULL)
1211 mod_hash_cancel(tep->te_addrhash, &tep->te_hash_hndl);
1213 if (IS_COTS(tep)) {
1214 TL_REMOVE_PEER(tep->te_conp);
1215 TL_REMOVE_PEER(tep->te_oconp);
1216 tl_serializer_refrele(tep->te_ser);
1217 tep->te_ser = NULL;
1218 ASSERT(tep->te_nicon == 0);
1219 ASSERT(list_head(&tep->te_iconp) == NULL);
1221 ASSERT(tep->te_lastep == NULL);
1222 ASSERT(list_head(&tep->te_flowlist) == NULL);
1223 ASSERT(tep->te_flowq == NULL);
1226 ASSERT(tep->te_bufcid == 0);
1227 ASSERT(tep->te_timoutid == 0);
1228 bzero(&tep->te_ap, sizeof (tep->te_ap));
1229 tep->te_acceptor_id = 0;
1231 ASSERT(tep->te_closewait == 0);
1232 ASSERT(!tep->te_rsrv_active);
1233 ASSERT(!tep->te_wsrv_active);
1234 tep->te_closing = 0;
1235 tep->te_nowsrv = B_FALSE;
1236 tep->te_flag = 0;
1238 kmem_cache_free(tl_cache, tep);
1285 tl_serializer_enter(tl_endpt_t *tep, tlproc_t tlproc, mblk_t *mp)
1287 if (IS_COTS(tep)) {
1288 mutex_enter(&tep->te_ser_lock);
1289 tep->te_ser_count++;
1290 mutex_exit(&tep->te_ser_lock);
1292 serializer_enter(tep->te_serializer, (srproc_t *)tlproc, mp, tep);
1300 tl_serializer_exit(tl_endpt_t *tep)
1302 if (IS_COTS(tep)) {
1303 mutex_enter(&tep->te_ser_lock);
1304 ASSERT(tep->te_ser_count != 0);
1305 tep->te_ser_count--;
1306 mutex_exit(&tep->te_ser_lock);
1387 tl_noclose(tl_endpt_t *tep)
1391 mutex_enter(&tep->te_closelock);
1392 if (! tep->te_closing) {
1393 ASSERT(tep->te_closewait == 0);
1394 tep->te_closewait++;
1397 mutex_exit(&tep->te_closelock);
1405 tl_closeok(tl_endpt_t *tep)
1407 ASSERT(tep->te_closewait > 0);
1408 mutex_enter(&tep->te_closelock);
1409 ASSERT(tep->te_closewait == 1);
1410 tep->te_closewait--;
1411 cv_signal(&tep->te_closecv);
1412 mutex_exit(&tep->te_closelock);
1422 tl_endpt_t *tep;
1443 tep = kmem_cache_alloc(tl_cache, KM_SLEEP);
1444 tep->te_refcnt = 1;
1445 tep->te_cpid = curproc->p_pid;
1446 rq->q_ptr = WR(rq)->q_ptr = tep;
1447 tep->te_state = TS_UNBND;
1448 tep->te_credp = credp;
1450 tep->te_zoneid = getzoneid();
1452 tep->te_flag = minor & TL_MINOR_MASK;
1453 tep->te_transport = &tl_transports[minor];
1456 tep->te_minor = (minor_t)id_alloc(tl_minors);
1459 (void) mod_hash_reserve(tep->te_addrhash, &tep->te_hash_hndl);
1462 if (IS_COTS(tep)) {
1464 tep->te_ser = tl_serializer_alloc(KM_SLEEP);
1467 list_create(&tep->te_iconp, sizeof (tl_icon_t),
1469 tep->te_qlen = 0;
1470 tep->te_nicon = 0;
1471 tep->te_oconp = NULL;
1472 tep->te_conp = NULL;
1475 tep->te_ser = tep->te_transport->tr_serializer;
1476 bzero(&tep->te_flows, sizeof (list_node_t));
1478 list_create(&tep->te_flowlist, sizeof (tl_endpt_t),
1480 tep->te_flowq = NULL;
1481 tep->te_lastep = NULL;
1486 if (IS_SOCKET(tep)) {
1488 tep->te_alen = TL_SOUX_ADDRLEN;
1489 tep->te_abuf = &tep->te_uxaddr;
1490 tep->te_vp = (void *)(uintptr_t)tep->te_minor;
1491 tep->te_magic = SOU_MAGIC_IMPLICIT;
1493 tep->te_alen = -1;
1494 tep->te_abuf = NULL;
1498 *devp = makedevice(getmajor(*devp), tep->te_minor);
1500 tep->te_rq = rq;
1501 tep->te_wq = WR(rq);
1504 if (IS_SOCKET(tep))
1505 tep->te_acceptor_id = tep->te_minor;
1507 tep->te_acceptor_id = (t_uscalar_t)rq;
1509 tep->te_acceptor_id = tep->te_minor;
1519 (void) mod_hash_insert(tep->te_transport->tr_ai_hash,
1520 (mod_hash_key_t)(uintptr_t)tep->te_acceptor_id,
1521 (mod_hash_val_t)tep);
1530 tl_endpt_t *tep = (tl_endpt_t *)rq->q_ptr;
1532 queue_t *wq = tep->te_wq;
1540 rc = mod_hash_remove(tep->te_transport->tr_ai_hash,
1541 (mod_hash_key_t)(uintptr_t)tep->te_acceptor_id,
1543 ASSERT(rc == 0 && tep == elp);
1544 if ((rc != 0) || (tep != elp)) {
1545 (void) (STRLOG(TL_ID, tep->te_minor, 1,
1553 mutex_enter(&tep->te_closelock);
1554 while (tep->te_closewait)
1555 cv_wait(&tep->te_closecv, &tep->te_closelock);
1556 tep->te_closing = B_TRUE;
1561 tep->te_closewait = 1;
1562 tep->te_nowsrv = B_FALSE;
1563 mutex_exit(&tep->te_closelock);
1569 tl_serializer_enter(tep, tl_close_ser, &tep->te_closemp);
1574 mutex_enter(&tep->te_closelock);
1575 while (tep->te_closewait)
1576 cv_wait(&tep->te_closecv, &tep->te_closelock);
1577 mutex_exit(&tep->te_closelock);
1581 if (tep->te_bufcid) {
1582 qunbufcall(rq, tep->te_bufcid);
1583 tep->te_bufcid = 0;
1585 if (tep->te_timoutid) {
1586 (void) quntimeout(rq, tep->te_timoutid);
1587 tep->te_timoutid = 0;
1597 * Fot a COTS endpoint wait before destroying tep since the serializer
1598 * may go away together with tep and we need to destroy serializer
1601 ASSERT(tep->te_closewait == 0);
1602 if (IS_COTS(tep))
1603 tep->te_closewait = 1;
1605 tl_refhold(tep);
1607 tl_serializer_enter(tep, tl_close_finish_ser, &tep->te_closemp);
1613 if (IS_COTS(tep)) {
1614 mutex_enter(&tep->te_closelock);
1615 while (tep->te_closewait)
1616 cv_wait(&tep->te_closecv, &tep->te_closelock);
1617 mutex_exit(&tep->te_closelock);
1620 crfree(tep->te_credp);
1621 tep->te_credp = NULL;
1622 tep->te_wq = NULL;
1623 tl_refrele(tep);
1625 * tep is likely to be destroyed now, so can't reference it any more.
1640 tl_close_ser(mblk_t *mp, tl_endpt_t *tep)
1642 ASSERT(tep->te_closing);
1643 ASSERT(tep->te_closewait == 1);
1644 ASSERT(!(tep->te_flag & TL_CLOSE_SER));
1646 tep->te_flag |= TL_CLOSE_SER;
1652 if (tep->te_wq->q_first && (IS_CLTS(tep) || IS_COTSORD(tep))) {
1653 tl_wsrv_ser(NULL, tep);
1657 tl_addr_unbind(tep);
1663 if (IS_COTS(tep) && !IS_SOCKET(tep)) {
1664 tl_endpt_t *peer_tep = tep->te_conp;
1666 tep->te_wq->q_next = NULL;
1671 tep->te_rq = NULL;
1674 tl_closeok(tep);
1675 tl_serializer_exit(tep);
1686 tl_close_finish_ser(mblk_t *mp, tl_endpt_t *tep)
1688 ASSERT(tep->te_closing);
1689 IMPLY(IS_CLTS(tep), tep->te_closewait == 0);
1690 IMPLY(IS_COTS(tep), tep->te_closewait == 1);
1692 tep->te_state = -1; /* Uninitialized */
1693 if (IS_COTS(tep)) {
1694 tl_co_unconnect(tep);
1697 TL_REMOVE_PEER(tep->te_lastep);
1702 tl_cl_backenable(tep);
1703 if (tep->te_flowq != NULL) {
1704 list_remove(&(tep->te_flowq->te_flowlist), tep);
1705 tep->te_flowq = NULL;
1709 tl_serializer_exit(tep);
1710 if (IS_COTS(tep))
1711 tl_closeok(tep);
1713 tl_refrele(tep);
1725 tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr;
1733 if (IS_CLTS(tep)) {
1734 (void) (STRLOG(TL_ID, tep->te_minor, 1,
1778 (void) (STRLOG(TL_ID, tep->te_minor, 1,
1805 if (IS_CLTS(tep)) {
1818 if (IS_COTS(tep) ||
1823 if ((tep->te_state == TS_IDLE) && !wq->q_first) {
1849 (void) (STRLOG(TL_ID, tep->te_minor, 1,
1857 tl_capability_req(mp, tep);
1867 (void) (STRLOG(TL_ID, tep->te_minor, 1,
1875 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
1885 tl_refhold(tep);
1886 tl_serializer_enter(tep, tl_proc, mp);
1893 tl_putq_ser(mblk_t *mp, tl_endpt_t *tep)
1895 if (tep->te_closing) {
1896 tl_wput_ser(mp, tep);
1898 TL_PUTQ(tep, mp);
1899 tl_serializer_exit(tep);
1900 tl_refrele(tep);
1906 tl_wput_common_ser(mblk_t *mp, tl_endpt_t *tep)
1912 tl_data(mp, tep);
1915 tl_do_proto(mp, tep);
1927 tl_wput_ser(mblk_t *mp, tl_endpt_t *tep)
1929 tl_wput_common_ser(mp, tep);
1930 tl_serializer_exit(tep);
1931 tl_refrele(tep);
1938 tl_wput_data_ser(mblk_t *mp, tl_endpt_t *tep)
1940 tl_endpt_t *peer_tep = tep->te_conp;
1944 ASSERT(IS_COTS(tep));
1946 IMPLY(peer_tep, tep->te_serializer == peer_tep->te_serializer);
1949 * fastpath for data. Ignore flow control if tep is closing.
1953 ((tep->te_state == TS_DATA_XFER) ||
1954 (tep->te_state == TS_WREQ_ORDREL)) &&
1955 (tep->te_wq != NULL) &&
1956 (tep->te_wq->q_first == NULL) &&
1960 (canputnext(peer_rq) || tep->te_closing)) {
1962 } else if (tep->te_closing) {
1964 * It is possible that by the time we got here tep started to
1969 if ((tep->te_wq != NULL) &&
1970 ((tep->te_state == TS_DATA_XFER) ||
1971 (tep->te_state == TS_WREQ_ORDREL))) {
1972 TL_PUTQ(tep, mp);
1977 TL_PUTQ(tep, mp);
1980 tl_serializer_exit(tep);
1981 tl_refrele(tep);
1995 tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr;
1997 while ((wq->q_first != NULL) && !tep->te_nowsrv) {
1998 mutex_enter(&tep->te_srv_lock);
1999 ASSERT(tep->te_wsrv_active == B_FALSE);
2000 tep->te_wsrv_active = B_TRUE;
2001 mutex_exit(&tep->te_srv_lock);
2003 tl_serializer_enter(tep, tl_wsrv_ser, &tep->te_wsrvmp);
2008 mutex_enter(&tep->te_srv_lock);
2009 while (tep->te_wsrv_active) {
2010 cv_wait(&tep->te_srv_cv, &tep->te_srv_lock);
2012 cv_signal(&tep->te_srv_cv);
2013 mutex_exit(&tep->te_srv_lock);
2023 tl_wsrv_ser(mblk_t *ser_mp, tl_endpt_t *tep)
2026 queue_t *wq = tep->te_wq;
2029 while (!tep->te_nowsrv && (mp = getq(wq)) != NULL) {
2030 tl_wput_common_ser(mp, tep);
2046 mutex_enter(&tep->te_srv_lock);
2047 ASSERT(tep->te_wsrv_active);
2048 tep->te_wsrv_active = B_FALSE;
2049 cv_signal(&tep->te_srv_cv);
2050 mutex_exit(&tep->te_srv_lock);
2051 tl_serializer_exit(tep);
2057 * flow controlled by tep.
2066 tl_endpt_t *tep = (tl_endpt_t *)rq->q_ptr;
2069 ASSERT(tep->te_rsrv_active == 0);
2071 tep->te_rsrv_active = B_TRUE;
2072 tl_serializer_enter(tep, tl_rsrv_ser, &tep->te_rsrvmp);
2076 mutex_enter(&tep->te_srv_lock);
2077 while (tep->te_rsrv_active) {
2078 cv_wait(&tep->te_srv_cv, &tep->te_srv_lock);
2080 cv_signal(&tep->te_srv_cv);
2081 mutex_exit(&tep->te_srv_lock);
2086 tl_rsrv_ser(mblk_t *mp, tl_endpt_t *tep)
2090 if (IS_CLTS(tep) && tep->te_state == TS_IDLE) {
2091 tl_cl_backenable(tep);
2093 IS_COTS(tep) &&
2094 ((peer_tep = tep->te_conp) != NULL) &&
2096 ((tep->te_state == TS_DATA_XFER) ||
2097 (tep->te_state == TS_WIND_ORDREL)||
2098 (tep->te_state == TS_WREQ_ORDREL))) {
2105 mutex_enter(&tep->te_srv_lock);
2106 ASSERT(tep->te_rsrv_active);
2107 tep->te_rsrv_active = B_FALSE;
2108 cv_signal(&tep->te_srv_cv);
2109 mutex_exit(&tep->te_srv_lock);
2110 tl_serializer_exit(tep);
2117 tl_do_proto(mblk_t *mp, tl_endpt_t *tep)
2127 tl_unbind(mp, tep);
2131 tl_addr_req(mp, tep);
2136 if (IS_CLTS(tep)) {
2137 tl_merror(tep->te_wq, mp, EPROTO);
2140 tl_conn_res(mp, tep);
2144 if (IS_CLTS(tep)) {
2145 tl_merror(tep->te_wq, mp, EPROTO);
2148 tl_discon_req(mp, tep);
2152 if (IS_CLTS(tep)) {
2153 tl_merror(tep->te_wq, mp, EPROTO);
2156 tl_data(mp, tep);
2160 if (IS_CLTS(tep)) {
2161 tl_merror(tep->te_wq, mp, EPROTO);
2164 tl_data(mp, tep);
2168 if (IS_CLTS(tep)) {
2169 tl_merror(tep->te_wq, mp, EPROTO);
2172 tl_exdata(mp, tep);
2176 if (! IS_COTSORD(tep)) {
2177 tl_merror(tep->te_wq, mp, EPROTO);
2180 tl_ordrel(mp, tep);
2184 if (IS_COTS(tep)) {
2185 tl_merror(tep->te_wq, mp, EPROTO);
2188 tl_unitdata(mp, tep);
2192 tl_merror(tep->te_wq, mp, EPROTO);
2202 tl_do_ioctl_ser(mblk_t *mp, tl_endpt_t *tep)
2204 if (! tep->te_closing)
2205 tl_do_ioctl(mp, tep);
2209 tl_serializer_exit(tep);
2210 tl_refrele(tep);
2214 tl_do_ioctl(mblk_t *mp, tl_endpt_t *tep)
2218 queue_t *wq = tep->te_wq;
2239 if (IS_SOCKET(tep) || (tep->te_flag & otheropt)) {
2259 tep->te_flag |= thisopt;
2261 tep->te_flag &= ~thisopt;
2335 tl_bind_ser(mblk_t *mp, tl_endpt_t *tep)
2337 if (! tep->te_closing)
2338 tl_bind(mp, tep);
2342 tl_serializer_exit(tep);
2343 tl_refrele(tep);
2351 tl_bind(mblk_t *mp, tl_endpt_t *tep)
2353 queue_t *wq = tep->te_wq;
2365 t_scalar_t save_state = tep->te_state;
2367 if (tep->te_state != TS_UNBND) {
2368 (void) (STRLOG(TL_ID, tep->te_minor, 1,
2371 tep->te_state));
2381 tep->te_state = NEXTSTATE(TE_BIND_REQ, tep->te_state);
2390 if (IS_COTS(tep)) {
2400 if ((tep->te_hash_hndl == NULL) &&
2401 ((tep->te_flag & TL_ADDRHASHED) == 0) &&
2402 mod_hash_reserve_nosleep(tep->te_addrhash,
2403 &tep->te_hash_hndl) != 0) {
2411 if (IS_SOCKET(tep)) {
2417 (void) (STRLOG(TL_ID, tep->te_minor,
2420 tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2431 (void) (STRLOG(TL_ID, tep->te_minor,
2434 tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2440 (void) (STRLOG(TL_ID, tep->te_minor,
2443 tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2449 (void) (STRLOG(TL_ID, tep->te_minor,
2452 tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2460 (void) (STRLOG(TL_ID, tep->te_minor,
2463 tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2468 (void) (STRLOG(TL_ID, tep->te_minor,
2471 tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2483 (void) (STRLOG(TL_ID, tep->te_minor,
2494 if (IS_SOCKET(tep)) {
2504 if (tep->te_flag & TL_ADDRHASHED) {
2505 ASSERT(IS_COTS(tep) && tep->te_qlen == 0);
2506 if (tep->te_vp == ux_addr.soua_vp)
2509 tl_addr_unbind(tep);
2516 rc = mod_hash_insert_reserve(tep->te_addrhash,
2518 (mod_hash_val_t)tep, tep->te_hash_hndl);
2527 (void) (STRLOG(TL_ID, tep->te_minor, 1,
2534 tep->te_uxaddr = ux_addr;
2535 tep->te_flag |= TL_ADDRHASHED;
2536 tep->te_hash_hndl = NULL;
2542 if (! tl_get_any_addr(tep, NULL)) {
2543 (void) (STRLOG(TL_ID, tep->te_minor,
2553 addr_req.ta_zoneid = tep->te_zoneid;
2555 tep->te_abuf = kmem_zalloc((size_t)alen, KM_NOSLEEP);
2556 if (tep->te_abuf == NULL) {
2560 bcopy(addr_req.ta_abuf, tep->te_abuf, addr_req.ta_alen);
2561 tep->te_alen = alen;
2563 if (mod_hash_insert_reserve(tep->te_addrhash,
2564 (mod_hash_key_t)&tep->te_ap, (mod_hash_val_t)tep,
2565 tep->te_hash_hndl) != 0) {
2572 (void) (STRLOG(TL_ID, tep->te_minor, 1,
2583 if (! tl_get_any_addr(tep, &addr_req)) {
2584 (void) (STRLOG(TL_ID, tep->te_minor, 1,
2591 tep->te_flag |= TL_ADDRHASHED;
2592 tep->te_hash_hndl = NULL;
2596 ASSERT(tep->te_alen >= 0);
2602 basize = sizeof (struct T_bind_ack) + tep->te_alen;
2605 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
2610 tl_addr_unbind(tep);
2611 tep->te_state = TS_UNBND;
2621 b_ack->ADDR_length = tep->te_alen;
2624 bcopy(tep->te_abuf, addr_startp, tep->te_alen);
2626 if (IS_COTS(tep)) {
2627 tep->te_qlen = qlen;
2629 tep->te_flag |= TL_LISTENER;
2632 tep->te_state = NEXTSTATE(TE_BIND_ACK, tep->te_state);
2645 tep->te_state = save_state;
2649 tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2658 tl_unbind(mblk_t *mp, tl_endpt_t *tep)
2663 if (tep->te_closing) {
2668 wq = tep->te_wq;
2687 if (tep->te_state != TS_IDLE) {
2688 (void) (STRLOG(TL_ID, tep->te_minor, 1,
2691 tep->te_state));
2695 tep->te_state = NEXTSTATE(TE_UNBIND_REQ, tep->te_state);
2704 if (! IS_SOCKET(tep) || !IS_CLTS(tep) || tep->te_qlen != 0 ||
2705 tep->te_magic != SOU_MAGIC_EXPLICIT) {
2712 tl_addr_unbind(tep);
2715 tep->te_state = NEXTSTATE(TE_OK_ACK1, tep->te_state);
2737 tl_endpt_t *tep;
2742 tep = (tl_endpt_t *)wq->q_ptr;
2760 if (!IS_SOCKET(tep) && tep->te_state != TS_IDLE &&
2767 (void) (STRLOG(TL_ID, tep->te_minor, 1,
2770 tep->te_state));
2806 tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr;
2817 ASSERT(IS_COTS(tep));
2819 if (tep->te_closing) {
2840 if (tep->te_state != TS_IDLE) {
2841 (void) (STRLOG(TL_ID, tep->te_minor, 1,
2844 tep->te_state));
2856 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
2869 if (IS_SOCKET(tep)) {
2874 (void) (STRLOG(TL_ID, tep->te_minor,
2884 (void) (STRLOG(TL_ID, tep->te_minor,
2896 (void) (STRLOG(TL_ID, tep->te_minor, 1,
2906 (void) (STRLOG(TL_ID, tep->te_minor, 1,
2922 (void) (STRLOG(TL_ID, tep->te_minor, 3,
2932 (void) (STRLOG(TL_ID, tep->te_minor, 1,
2943 * Prevent tep from closing on us.
2945 if (! tl_noclose(tep)) {
2946 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
2953 tep->te_state = NEXTSTATE(TE_CONN_REQ, tep->te_state);
2961 dst.ta_zoneid = tep->te_zoneid;
2966 peer_tep = (IS_SOCKET(tep) ?
2967 tl_sock_find_peer(tep, &ux_addr) :
2968 tl_find_peer(tep, &dst));
2971 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
2979 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE,
2991 tep->te_state = NEXTSTATE(TE_OK_ACK1, tep->te_state);
2992 tl_ok_ack(tep->te_wq, ackmp, T_CONN_REQ);
2993 tl_closeok(tep);
3004 tep->te_state = TS_IDLE;
3008 putnext(tep->te_rq, dimp);
3020 tl_serializer_refrele(tep->te_ser);
3021 tep->te_ser = peer_tep->te_ser;
3022 ASSERT(tep->te_oconp == NULL);
3023 tep->te_oconp = peer_tep;
3028 tl_closeok(tep);
3037 tl_refhold(tep);
3038 tl_serializer_enter(tep, tl_conn_req_ser, mp);
3045 tl_conn_req_ser(mblk_t *mp, tl_endpt_t *tep)
3048 tl_endpt_t *peer_tep = tep->te_oconp;
3063 if (tep->te_closing) {
3064 TL_UNCONNECT(tep->te_oconp);
3065 tl_serializer_exit(tep);
3066 tl_refrele(tep);
3071 wq = tep->te_wq;
3072 tep->te_flag |= TL_EAGER;
3086 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE | SL_ERROR,
3089 TL_UNCONNECT(tep->te_oconp);
3092 tl_serializer_exit(tep);
3093 tl_refrele(tep);
3118 ci_msz = sizeof (struct T_conn_ind) + tep->te_alen;
3131 tep->te_state = TS_IDLE;
3134 TL_UNCONNECT(tep->te_oconp);
3135 tl_serializer_exit(tep);
3136 tl_refrele(tep);
3143 if (IS_SOCKET(tep) && !tl_disable_early_connect) {
3155 tep->te_state = TS_IDLE;
3160 TL_UNCONNECT(tep->te_oconp);
3161 tl_serializer_exit(tep);
3162 tl_refrele(tep);
3174 tep->te_state = TS_IDLE;
3180 TL_UNCONNECT(tep->te_oconp);
3181 tl_serializer_exit(tep);
3182 tl_refrele(tep);
3191 tep->te_state = TS_IDLE;
3197 TL_UNCONNECT(tep->te_oconp);
3198 tl_serializer_exit(tep);
3199 tl_refrele(tep);
3212 tep->te_state = NEXTSTATE(TE_OK_ACK1, tep->te_state);
3230 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
3233 TL_UNCONNECT(tep->te_oconp);
3234 tl_serializer_exit(tep);
3235 tl_refrele(tep);
3248 ci->SRC_length = tep->te_alen;
3249 ci->SEQ_number = tep->te_seqno;
3252 bcopy(tep->te_abuf, addr_startp, tep->te_alen);
3278 * increment references for both peer_tep and tep: peer_tep is placed on
3279 * te_oconp and tep is placed on listeners queue.
3281 tip->ti_tep = tep;
3282 tip->ti_seqno = tep->te_seqno;
3297 tep->te_state = NEXTSTATE(TE_CONN_CON, tep->te_state);
3299 putnext(tep->te_rq, confmp);
3302 * Now we need to increment tep reference because tep is referenced by
3307 ASSERT(tep->te_refcnt >= 2);
3309 tl_serializer_exit(tep);
3325 tl_conn_res(mblk_t *mp, tl_endpt_t *tep)
3343 ASSERT(IS_COTS(tep));
3345 if (tep->te_closing) {
3350 wq = tep->te_wq;
3374 if (tep->te_state != TS_WRES_CIND) {
3375 (void) (STRLOG(TL_ID, tep->te_minor, 1,
3378 tep->te_state));
3390 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
3399 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
3410 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
3417 tep->te_state = NEXTSTATE(TE_CONN_RES, tep->te_state);
3418 ASSERT(tep->te_state == TS_WACK_CRES);
3422 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR,
3424 tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
3433 if (mod_hash_find_cb(tep->te_transport->tr_ai_hash,
3436 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR,
3438 tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
3448 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR,
3450 tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
3464 if ((tep != acc_ep) && (acc_ep->te_state != TS_IDLE)) {
3465 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR,
3468 tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
3481 if ((tep == acc_ep) && (tep->te_nicon > 1)) {
3482 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
3484 tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
3498 tip = tl_icon_find(tep, cres->SEQ_number);
3500 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR,
3502 tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
3573 (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE,
3603 tep->te_state = TS_WRES_CIND;
3618 if (tep->te_nicon == 1) {
3619 if (tep == acc_ep)
3620 tep->te_state = NEXTSTATE(TE_OK_ACK2, tep->te_state);
3622 tep->te_state = NEXTSTATE(TE_OK_ACK3, tep->te_state);
3624 tep->te_state = NEXTSTATE(TE_OK_ACK4, tep->te_state);
3638 (void) (STRLOG(TL_ID, tep->te_minor, 3,
3660 tep->te_state = TS_IDLE;
3675 if (tep != acc_ep)
3690 tl_freetip(tep, tip);
3718 (void) (STRLOG(TL_ID, tep->te_minor, 3,
3773 ASSERT(cl_ep->te_ser == tep->te_ser);
3808 tl_freetip(tep, tip);
3818 if (! IS_SOCKET(tep)) {
3857 tl_discon_req(mblk_t *mp, tl_endpt_t *tep)
3862 tl_endpt_t *peer_tep = tep->te_conp;
3863 tl_endpt_t *srv_tep = tep->te_oconp;
3870 if (tep->te_closing) {
3876 TL_UNCONNECT(tep->te_conp);
3880 TL_UNCONNECT(tep->te_oconp);
3884 wq = tep->te_wq;
3908 save_state = new_state = tep->te_state;
3911 (void) (STRLOG(TL_ID, tep->te_minor, 1,
3914 tep->te_state));
3923 new_state = NEXTSTATE(TE_DISCON_REQ, tep->te_state);
3927 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
3929 tep->te_state = NEXTSTATE(TE_ERROR_ACK, new_state);
3939 if (tep->te_nicon > 0) { /* server */
3944 tip = tl_icon_find(tep, dr->SEQ_number);
3946 (void) (STRLOG(TL_ID, tep->te_minor, 2,
3949 tep->te_state = NEXTSTATE(TE_ERROR_ACK, new_state);
3978 if (tep->te_nicon == 0)
3981 if (tep->te_nicon == 1)
3989 if ((tep->te_nicon <= 1) &&
4001 if (tep->te_nicon > 0) { /* listener */
4004 * disconnect incoming connect request pending to tep
4007 (void) (STRLOG(TL_ID, tep->te_minor, 2,
4010 tep->te_state = new_state;
4031 tl_freetip(tep, tip);
4032 } else if ((peer_tep = tep->te_oconp) != NULL) { /* client */
4034 * disconnect an outgoing request pending from tep
4038 (void) (STRLOG(TL_ID, tep->te_minor, 2,
4041 tep->te_state = new_state;
4049 di->SEQ_number = tep->te_seqno;
4058 if (IS_SOCKET(tep) && !tl_disable_early_connect) {
4065 tl_icon_queuemsg(peer_tep, tep->te_seqno, dimp);
4070 * it as a hint not to free the tep.
4074 new_state = tep->te_state;
4077 tip = tl_icon_find(peer_tep, tep->te_seqno);
4079 ASSERT(tep == tip->ti_tep);
4091 ASSERT(tep->te_oconp != NULL);
4092 TL_UNCONNECT(tep->te_oconp);
4094 } else if ((peer_tep = tep->te_conp) != NULL) { /* connected! */
4096 (void) (STRLOG(TL_ID, tep->te_minor, 2,
4099 tep->te_state = new_state;
4110 tep->te_state = new_state;
4116 tep->te_state = new_state;
4145 if (tep->te_conp) { /* disconnect pointers if connected */
4161 if (! IS_SOCKET(tep)) {
4165 tep->te_wq->q_next = NULL;
4168 TL_UNCONNECT(tep->te_conp);
4173 tl_addr_req_ser(mblk_t *mp, tl_endpt_t *tep)
4175 if (!tep->te_closing)
4176 tl_addr_req(mp, tep);
4180 tl_serializer_exit(tep);
4181 tl_refrele(tep);
4185 tl_addr_req(mblk_t *mp, tl_endpt_t *tep)
4192 if (tep->te_closing) {
4197 wq = tep->te_wq;
4204 if (IS_CLTS(tep) ||
4205 (tep->te_state > TS_WREQ_ORDREL) ||
4206 (tep->te_state < TS_DATA_XFER)) {
4212 if (tep->te_state >= TS_IDLE)
4214 ack_sz += tep->te_alen;
4217 (void) (STRLOG(TL_ID, tep->te_minor, 1,
4232 if (tep->te_state >= TS_IDLE) {
4234 taa->LOCADDR_length = tep->te_alen;
4237 bcopy(tep->te_abuf, ackmp->b_wptr,
4238 tep->te_alen);
4239 ackmp->b_wptr += tep->te_alen;
4245 ASSERT(tep->te_state == TS_DATA_XFER ||
4246 tep->te_state == TS_WIND_ORDREL ||
4247 tep->te_state == TS_WREQ_ORDREL);
4249 tl_connected_cots_addr_req(mp, tep);
4255 tl_connected_cots_addr_req(mblk_t *mp, tl_endpt_t *tep)
4257 tl_endpt_t *peer_tep = tep->te_conp;
4263 if (tep->te_closing) {
4269 tl_error_ack(tep->te_wq, mp, TSYSERR, ECONNRESET, T_ADDR_REQ);
4273 ASSERT(tep->te_state >= TS_IDLE);
4276 ack_sz += T_ALIGN(tep->te_alen);
4281 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
4283 tl_memrecover(tep->te_wq, mp, ack_sz);
4290 taa->LOCADDR_length = tep->te_alen;
4295 bcopy(tep->te_abuf, addr_startp,
4296 tep->te_alen);
4308 putnext(tep->te_rq, ackmp);
4312 tl_copy_info(struct T_info_ack *ia, tl_endpt_t *tep)
4314 if (IS_CLTS(tep)) {
4319 if (IS_COTSORD(tep))
4323 ia->CURRENT_state = tep->te_state;
4331 tl_capability_req(mblk_t *mp, tl_endpt_t *tep)
4337 if (tep->te_closing) {
4347 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
4349 tl_memrecover(tep->te_wq, mp,
4358 tl_copy_info(&tcap->INFO_ack, tep);
4363 tcap->ACCEPTOR_id = tep->te_acceptor_id;
4367 putnext(tep->te_rq, ackmp);
4371 tl_info_req_ser(mblk_t *mp, tl_endpt_t *tep)
4373 if (! tep->te_closing)
4374 tl_info_req(mp, tep);
4378 tl_serializer_exit(tep);
4379 tl_refrele(tep);
4383 tl_info_req(mblk_t *mp, tl_endpt_t *tep)
4390 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
4392 tl_memrecover(tep->te_wq, mp, sizeof (struct T_info_ack));
4399 tl_copy_info((struct T_info_ack *)ackmp->b_rptr, tep);
4404 putnext(tep->te_rq, ackmp);
4412 tl_data(mblk_t *mp, tl_endpt_t *tep)
4414 queue_t *wq = tep->te_wq;
4419 boolean_t closing = tep->te_closing;
4421 if (IS_CLTS(tep)) {
4422 (void) (STRLOG(TL_ID, tep->te_minor, 2,
4439 (tep->te_state != TS_DATA_XFER) &&
4440 (tep->te_state != TS_WREQ_ORDREL)) {
4448 (void) (STRLOG(TL_ID, tep->te_minor, 1,
4458 (msz < sizeof (struct T_optdata_req) || !IS_SOCKET(tep))) {
4459 (void) (STRLOG(TL_ID, tep->te_minor, 1,
4474 switch (tep->te_state) {
4480 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
4486 if (tep->te_conp != NULL)
4489 if (tep->te_oconp == NULL) {
4507 (void) (STRLOG(TL_ID, tep->te_minor, 1,
4510 TL_PUTBQ(tep, mp);
4524 tl_icon_queuemsg(tep->te_oconp, tep->te_seqno, mp);
4528 if (tep->te_conp == NULL) {
4535 (void) (STRLOG(TL_ID, tep->te_minor, 3,
4538 tl_discon_ind(tep, 0);
4545 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
4551 * tep->te_state = NEXTSTATE(TE_DATA_REQ, tep->te_state);
4558 if (((peer_tep = tep->te_conp) == NULL) || peer_tep->te_closing) {
4561 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
4566 ASSERT(tep->te_serializer == peer_tep->te_serializer);
4575 TL_PUTBQ(tep, mp);
4588 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
4613 tl_exdata(mblk_t *mp, tl_endpt_t *tep)
4615 queue_t *wq = tep->te_wq;
4620 boolean_t closing = tep->te_closing;
4623 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
4639 (tep->te_state != TS_DATA_XFER) &&
4640 (tep->te_state != TS_WREQ_ORDREL)) {
4648 switch (tep->te_state) {
4654 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
4660 if (tep->te_conp != NULL)
4663 if (tep->te_oconp == NULL) {
4681 (void) (STRLOG(TL_ID, tep->te_minor, 1,
4684 TL_PUTBQ(tep, mp);
4687 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
4690 tl_icon_queuemsg(tep->te_oconp, tep->te_seqno, mp);
4694 if (tep->te_conp == NULL) {
4701 (void) (STRLOG(TL_ID, tep->te_minor, 3,
4704 tl_discon_ind(tep, 0);
4710 (void) (STRLOG(TL_ID, tep->te_minor, 1,
4713 tep->te_state));
4718 * tep->te_state = NEXTSTATE(TE_EXDATA_REQ, tep->te_state);
4725 if (((peer_tep = tep->te_conp) == NULL) || peer_tep->te_closing) {
4728 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
4741 TL_PUTBQ(tep, mp);
4754 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
4777 tl_ordrel(mblk_t *mp, tl_endpt_t *tep)
4779 queue_t *wq = tep->te_wq;
4784 boolean_t closing = tep->te_closing;
4787 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
4800 switch (tep->te_state) {
4804 if (tep->te_conp != NULL)
4807 if (tep->te_oconp == NULL)
4820 (void) (STRLOG(TL_ID, tep->te_minor, 1,
4823 TL_PUTBQ(tep, mp);
4826 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
4829 (void) tl_icon_queuemsg(tep->te_oconp, tep->te_seqno, mp);
4833 (void) (STRLOG(TL_ID, tep->te_minor, 1,
4836 tep->te_state));
4844 tep->te_state = NEXTSTATE(TE_ORDREL_REQ, tep->te_state);
4849 if (((peer_tep = tep->te_conp) == NULL) || peer_tep->te_closing) {
4851 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
4865 TL_PUTBQ(tep, mp);
4878 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
4889 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
4906 tl_endpt_t *tep;
4915 tep = (tl_endpt_t *)wq->q_ptr;
4927 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
4968 tep->te_state = NEXTSTATE(TE_UDERROR_IND, tep->te_state);
4974 tl_unitdata_ser(mblk_t *mp, tl_endpt_t *tep)
4976 queue_t *wq = tep->te_wq;
4978 if (!tep->te_closing && (wq->q_first != NULL)) {
4979 TL_PUTQ(tep, mp);
4980 } else if (tep->te_rq != NULL)
4981 tl_unitdata(mp, tep);
4985 tl_serializer_exit(tep);
4986 tl_refrele(tep);
4995 tl_unitdata(mblk_t *mp, tl_endpt_t *tep)
4997 queue_t *wq = tep->te_wq;
5016 if (tep->te_state != TS_IDLE) {
5017 (void) (STRLOG(TL_ID, tep->te_minor, 1,
5024 * tep->te_state = NEXTSTATE(TE_UNITDATA_REQ, tep->te_state);
5034 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
5046 if (IS_SOCKET(tep)) {
5052 (void) (STRLOG(TL_ID, tep->te_minor,
5064 (void) (STRLOG(TL_ID, tep->te_minor,
5080 (void) (STRLOG(TL_ID, tep->te_minor, 1,
5089 if (alen == 0 || (olen != 0 && !IS_SOCKET(tep))) {
5090 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
5102 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
5111 destaddr.ta_zoneid = tep->te_zoneid;
5118 peer_tep = tep->te_lastep;
5127 peer_tep = (IS_SOCKET(tep) ?
5128 tl_sock_find_peer(tep, &ux_addr) :
5129 tl_find_peer(tep, &destaddr));
5132 (void) (STRLOG(TL_ID, tep->te_minor, 3,
5142 if (tep->te_lastep != NULL)
5143 tl_refrele(tep->te_lastep);
5145 tep->te_lastep = peer_tep;
5149 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
5162 if (!canputnext(peer_tep->te_rq) && !(tep->te_closing)) {
5164 if (tep->te_flowq != NULL) {
5165 list_remove(&tep->te_flowq->te_flowlist, tep);
5167 list_insert_head(&peer_tep->te_flowlist, tep);
5168 tep->te_flowq = peer_tep;
5169 TL_PUTBQ(tep, mp);
5200 ui_sz = T_ALIGN(sizeof (struct T_unitdata_ind) + tep->te_alen) + olen;
5220 if (msz >= ui_sz && alen >= tep->te_alen &&
5227 udind->SRC_length = tep->te_alen;
5229 bcopy(tep->te_abuf, addr_startp, tep->te_alen);
5231 } else if (MBLKSIZE(mp) >= reuse_mb_sz && alen >= tep->te_alen &&
5241 udind->SRC_length = tep->te_alen;
5243 bcopy(tep->te_abuf, addr_startp, tep->te_alen);
5272 (void) (STRLOG(TL_ID, tep->te_minor, 4, SL_TRACE,
5286 udind->SRC_length = tep->te_alen;
5288 bcopy(tep->te_abuf, addr_startp, tep->te_alen);
5334 tl_find_peer(tl_endpt_t *tep, tl_addr_t *ap)
5337 int rc = mod_hash_find_cb(tep->te_addrhash, (mod_hash_key_t)ap,
5340 ASSERT(! IS_SOCKET(tep));
5343 ASSERT(ap->ta_zoneid == tep->te_zoneid);
5347 (tep->te_zoneid == peer_tep->te_zoneid) &&
5348 (tep->te_transport == peer_tep->te_transport));
5364 tl_sock_find_peer(tl_endpt_t *tep, soux_addr_t *ux_addr)
5368 tep->te_aihash : tep->te_addrhash;
5372 ASSERT(IS_SOCKET(tep));
5374 IMPLY(rc == 0, (tep->te_transport == peer_tep->te_transport));
5386 if ((peer_tep->te_zoneid != tep->te_zoneid) &&
5414 tl_get_any_addr(tl_endpt_t *tep, tl_addr_t *req)
5419 ASSERT(tep->te_hash_hndl != NULL);
5420 ASSERT(! IS_SOCKET(tep));
5422 if (tep->te_hash_hndl == NULL)
5433 ASSERT(tep->te_zoneid == req->ta_zoneid);
5436 if (tep->te_alen < alen) {
5440 * Not enough space in tep->ta_ap to hold the address,
5446 if (tep->te_alen > 0)
5447 kmem_free(tep->te_abuf, tep->te_alen);
5449 tep->te_alen = alen;
5450 tep->te_abuf = abuf;
5456 bcopy(req->ta_abuf, tep->te_abuf, (size_t)req->ta_alen);
5462 bcopy(&tep->te_minor, tep->te_abuf, sizeof (uint32_t));
5465 if (mod_hash_insert_reserve(tep->te_addrhash,
5466 (mod_hash_key_t)&tep->te_ap, (mod_hash_val_t)tep,
5467 tep->te_hash_hndl) == 0) {
5471 tep->te_flag |= TL_ADDRHASHED;
5472 tep->te_hash_hndl = NULL;
5479 bcopy(&tep->te_defaddr, tep->te_abuf, sizeof (uint32_t));
5480 atomic_inc_32(&tep->te_defaddr);
5506 tl_cl_backenable(tl_endpt_t *tep)
5508 list_t *l = &tep->te_flowlist;
5511 ASSERT(IS_CLTS(tep));
5514 ASSERT(tep->te_ser == elp->te_ser);
5515 ASSERT(elp->te_flowq == tep);
5527 tl_co_unconnect(tl_endpt_t *tep)
5529 tl_endpt_t *peer_tep = tep->te_conp;
5530 tl_endpt_t *srv_tep = tep->te_oconp;
5536 ASSERT(IS_COTS(tep));
5541 TL_UNCONNECT(tep->te_conp);
5545 TL_UNCONNECT(tep->te_oconp);
5549 if (tep->te_nicon > 0) {
5550 l = &tep->te_iconp;
5558 while (tep->te_nicon > 0) {
5563 tl_freetip(tep, tip);
5573 tl_freetip(tep, tip);
5584 (void) (STRLOG(TL_ID, tep->te_minor, 3,
5589 tl_freetip(tep, tip);
5597 if (IS_SOCKET(tep) && !tl_disable_early_connect &&
5599 !tl_icon_hasprim(srv_tep, tep->te_seqno, T_ORDREL_IND)) {
5609 d_mp = tl_discon_ind_alloc(ECONNRESET, tep->te_seqno);
5612 (void) (STRLOG(TL_ID, tep->te_minor, 3,
5615 TL_UNCONNECT(tep->te_oconp);
5626 if (IS_SOCKET(tep) && !tl_disable_early_connect) {
5630 tl_icon_queuemsg(srv_tep, tep->te_seqno, d_mp);
5632 tip = tl_icon_find(srv_tep, tep->te_seqno);
5636 ASSERT(tep == tip->ti_tep);
5637 ASSERT(tep->te_ser == srv_tep->te_ser);
5655 TL_UNCONNECT(tep->te_oconp);
5666 ASSERT(tep->te_ser == peer_tep->te_ser);
5673 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
5679 (void) (STRLOG(TL_ID, tep->te_minor, 3,
5706 (void) (STRLOG(TL_ID, tep->te_minor, 3,
5709 tep->te_state));
5712 (void) (STRLOG(TL_ID, tep->te_minor, 3,
5714 "tl_co_unconnect: state %d", tep->te_state));
5722 if (tep->te_closing) {
5723 peer_tep = tep->te_conp;
5725 TL_REMOVE_PEER(tep->te_conp);
5736 tl_discon_ind(tl_endpt_t *tep, uint32_t reason)
5740 if (tep->te_closing)
5746 flushq(tep->te_rq, FLUSHDATA);
5747 (void) putnextctl1(tep->te_rq, M_FLUSH, FLUSHRW);
5752 d_mp = tl_discon_ind_alloc(reason, tep->te_seqno);
5754 (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
5758 tep->te_state = TS_IDLE;
5759 putnext(tep->te_rq, d_mp);
5809 tl_icon_find(tl_endpt_t *tep, t_scalar_t seqno)
5811 list_t *l = &tep->te_iconp;
5828 tl_icon_queuemsg(tl_endpt_t *tep, t_scalar_t seqno, mblk_t *nmp)
5839 tip = tl_icon_find(tep, seqno);
5880 tl_icon_hasprim(tl_endpt_t *tep, t_scalar_t seqno, t_scalar_t prim)
5882 tl_icon_t *tip = tl_icon_find(tep, seqno);
5900 tl_icon_sendmsgs(tl_endpt_t *tep, mblk_t **mpp)
5905 if (tep->te_closing) {
5910 ASSERT(tep->te_state == TS_DATA_XFER);
5911 ASSERT(tep->te_rq->q_first == NULL);
5923 putnext(tep->te_rq, mp);
5932 putnext(tep->te_rq, mp);
5935 tep->te_state = NEXTSTATE(TE_ORDREL_IND,
5936 tep->te_state);
5937 putnext(tep->te_rq, mp);
5940 tep->te_state = TS_IDLE;
5941 putnext(tep->te_rq, mp);
5982 tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr;
5984 if (tep->te_closing) {
5989 (void) (STRLOG(TL_ID, tep->te_minor, 1,
5991 "tl_merror: tep=%p, err=%d", (void *)tep, error));
5998 if (IS_COTS(tep)) {
6000 tl_co_unconnect(tep);
6011 (void) (STRLOG(TL_ID, tep->te_minor, 1,
6024 (void) putnextctl1(tep->te_rq, M_ERROR, error);
6082 tl_endpt_t *tep;
6085 tep = (tl_endpt_t *)wq->q_ptr;
6095 if (! IS_SOCKET(tep))
6101 *valp = (tep->te_flag & TL_SOCKUCRED) != 0;
6139 tl_endpt_t *tep;
6141 tep = (tl_endpt_t *)wq->q_ptr;
6151 if (! IS_SOCKET(tep)) {
6166 if (! IS_CLTS(tep)) {
6171 tep->te_flag &= ~TL_SOCKUCRED;
6173 tep->te_flag |= TL_SOCKUCRED;
6191 (void) (STRLOG(TL_ID, tep->te_minor, 1,
6206 tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr;
6208 ASSERT(tep);
6210 tep->te_timoutid = 0;
6224 tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr;
6226 ASSERT(tep);
6228 tep->te_bufcid = 0;
6229 tep->te_nowsrv = B_FALSE;
6242 tl_endpt_t *tep;
6244 tep = (tl_endpt_t *)wq->q_ptr;
6246 if (tep->te_closing) {
6254 if (tep->te_bufcid || tep->te_timoutid) {
6255 (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
6260 if (!(tep->te_bufcid = qbufcall(wq, size, BPRI_MED, tl_buffer, wq))) {
6261 tep->te_timoutid = qtimeout(wq, tl_timer, wq,
6267 tl_freetip(tl_endpt_t *tep, tl_icon_t *tip)
6279 list_remove(&tep->te_iconp, tip);
6281 tep->te_nicon--;
6288 tl_addr_unbind(tl_endpt_t *tep)
6292 if (tep->te_flag & TL_ADDRHASHED) {
6293 if (IS_SOCKET(tep)) {
6294 (void) mod_hash_remove(tep->te_addrhash,
6295 (mod_hash_key_t)tep->te_vp,
6297 tep->te_vp = (void *)(uintptr_t)tep->te_minor;
6298 tep->te_magic = SOU_MAGIC_IMPLICIT;
6300 (void) mod_hash_remove(tep->te_addrhash,
6301 (mod_hash_key_t)&tep->te_ap,
6303 (void) kmem_free(tep->te_abuf, tep->te_alen);
6304 tep->te_alen = -1;
6305 tep->te_abuf = NULL;
6307 tep->te_flag &= ~TL_ADDRHASHED;