Lines Matching defs:sock_id

312 static void tcp_rcv_drain(int sock_id, tcp_t *);
325 uint32_t, int, tcph_t *, int sock_id);
468 tcp_drain_input(tcp_t *tcp, int sock_id, int timeout)
476 dprintf("tcp_drain_input(%d): %s\n", sock_id,
484 old_timeout = sockets[sock_id].in_timeout;
485 sockets[sock_id].in_timeout = timeout;
491 old_in_gram = sockets[sock_id].inq;
492 sockets[sock_id].inq = NULL;
496 if (sockets[sock_id].input[i] != NULL) {
497 if (sockets[sock_id].input[i](sock_id) < 0) {
498 sockets[sock_id].in_timeout = old_timeout;
499 if (sockets[sock_id].inq != NULL)
500 nuke_grams(&sockets[sock_id].inq);
501 sockets[sock_id].inq = old_in_gram;
509 while ((in_gram = sockets[sock_id].inq) != NULL) {
516 del_gram(&sockets[sock_id].inq, in_gram, B_TRUE);
520 del_gram(&sockets[sock_id].inq, in_gram, B_FALSE);
522 tcp_rput_data(tcp, mp, sock_id);
523 sockets[sock_id].in_timeout = old_timeout;
531 if (sockets[sock_id].pcb == NULL)
535 if (tcp == NULL || sockets[sock_id].pcb == NULL) {
536 if (sockets[sock_id].so_error != 0)
544 sockets[sock_id].in_timeout = old_timeout;
545 sockets[sock_id].inq = old_in_gram;
550 tcp_drain_needed(sock_id, tcp);
561 tcp_input(int sock_id)
569 if ((tcp = sockets[sock_id].pcb) == NULL)
572 while ((in_gram = sockets[sock_id].inq) != NULL) {
579 del_gram(&sockets[sock_id].inq, in_gram, B_TRUE);
583 del_gram(&sockets[sock_id].inq, in_gram, B_FALSE);
585 tcp_rput_data(tcp, mp, sock_id);
587 if (sockets[sock_id].pcb == NULL)
593 tcp_rcv_drain(sock_id, tcp);
597 sockets[sock_id].so_state |= SS_CANTRCVMORE;
610 tcp_send(int sock_id, tcp_t *tcp, const void *msg, int len)
677 if (tcp_drain_input(tcp, sock_id, 5) < 0)
680 tcp_wput_data(tcp, head, sock_id);
796 tcp_eager_cleanup(tcp_t *listener, boolean_t q0_only, int sock_id)
806 sock_id);
814 eager, NULL, eager->tcp_snxt, 0, TH_RST, 0, sock_id);
823 tcp_shutdown(int sock_id)
827 DEBUG_1("tcp_shutdown: sock_id %x\n", sock_id);
829 if ((tcp = sockets[sock_id].pcb) == NULL) {
838 if (tcp_drain_input(tcp, sock_id, 5) < 0) {
843 if (sockets[sock_id].pcb != NULL)
844 tcp_clean_death(sock_id, tcp, 0);
862 if (tcp_xmit_end(tcp, sock_id) == 0 &&
863 tcp_state_wait(sock_id, tcp, TCPS_FIN_WAIT_2) < 0) {
865 if (sockets[sock_id].pcb == NULL)
880 tcp_close(int sock_id)
886 if ((tcp = sockets[sock_id].pcb) == NULL) {
897 if (tcp_drain_input(tcp, sock_id, 5) < 0) {
902 if (sockets[sock_id].pcb != NULL)
903 tcp_clean_death(sock_id, tcp, 0);
909 tcp_eager_cleanup(tcp, 0, sock_id);
958 (void) tcp_xmit_end(tcp, sock_id);
959 if (sockets[sock_id].pcb == NULL)
978 if (tcp_drain_input(tcp, sock_id, 5) < 0) {
979 if (sockets[sock_id].pcb != NULL) {
980 tcp_clean_death(sock_id,
988 if (tcp_state_wait(sock_id, tcp, TCPS_TIME_WAIT) < 0) {
990 if (sockets[sock_id].pcb == NULL)
1008 sock_id);
1013 sockets[sock_id].pcb = NULL;
1019 tcp_listen(int sock_id, int backlog)
1023 if ((tcp = (tcp_t *)(sockets[sock_id].pcb)) == NULL) {
1050 tcp_accept(int sock_id, struct sockaddr *addr, socklen_t *addr_len)
1059 if ((listener = (tcp_t *)(sockets[sock_id].pcb)) == NULL ||
1067 if (sockets[sock_id].in_timeout > tcp_accept_timeout)
1068 timeout = prom_gettime() + sockets[sock_id].in_timeout;
1076 if (tcp_drain_input(listener, sock_id, 5) < 0) {
1117 printf("tcp_accept(), new sock_id: %d\n", sd);
1179 tcp_bind(int sock_id)
1186 if ((tcp = (tcp_t *)sockets[sock_id].pcb) == NULL) {
1197 requested_port = ntohs(sockets[sock_id].bind.sin_port);
1200 tcp->tcp_bound_source = sockets[sock_id].bind.sin_addr.s_addr;
1213 reuseaddr = sockets[sock_id].so_opt & SO_REUSEADDR;
1223 sockets[sock_id].bind.sin_port = tcp->tcp_lport;
1260 tcp_connect(int sock_id)
1269 if ((tcp = (tcp_t *)(sockets[sock_id].pcb)) == NULL) {
1276 dstaddr = sockets[sock_id].remote.sin_addr.s_addr;
1277 dstport = sockets[sock_id].remote.sin_port;
1308 ipv4_getipaddr(&(sockets[sock_id].bind.sin_addr));
1310 if (ntohl(sockets[sock_id].bind.sin_addr.s_addr) ==
1315 tcp->tcp_bound_source = sockets[sock_id].bind.sin_addr.s_addr;
1409 ret = ipv4_tcp_output(sock_id, syn_mp);
1421 return (tcp_state_wait(sock_id, tcp, TCPS_ESTABLISHED));
1603 tcp_conn_request(tcp_t *tcp, mblk_t *mp, uint_t sock_id, uint_t ip_hdr_len)
1700 tcp_state_wait(int sock_id, tcp_t *tcp, int state)
1714 timeout = sockets[sock_id].in_timeout;
1716 sockets[sock_id].in_timeout = tcp->tcp_rto;
1720 if (sockets[sock_id].inq == NULL) {
1723 if (sockets[sock_id].input[i] != NULL) {
1724 if (sockets[sock_id].input[i](sock_id) < 0) {
1726 sockets[sock_id].in_timeout =
1735 while ((in_gram = sockets[sock_id].inq) != NULL) {
1746 del_gram(&sockets[sock_id].inq, in_gram, B_TRUE);
1750 del_gram(&sockets[sock_id].inq, in_gram, B_FALSE);
1752 tcp_rput_data(tcp, mp, sock_id);
1760 if (sockets[sock_id].pcb == NULL) {
1766 if (tcp == NULL || sockets[sock_id].pcb == NULL) {
1769 "error %d\n", state, sockets[sock_id].so_error);
1771 if (sockets[sock_id].so_error != 0)
1786 tcp_timer(tcp, sock_id);
1791 sockets[sock_id].in_timeout = timeout;
1793 tcp_drain_needed(sock_id, tcp);
1829 tcp_lookup_ipv4(struct ip *iph, tcpha_t *tcph, int min_state, int *sock_id)
1842 *sock_id = i;
1855 *sock_id = -1;
1864 tcp_lookup_listener_ipv4(in_addr_t addr, in_port_t port, int *sock_id)
1875 *sock_id = i;
1920 tcp_clean_death(int sock_id, tcp_t *tcp, int err)
1926 if (sock_id >= 0) {
1927 sockets[sock_id].pcb = NULL;
1929 sockets[sock_id].so_error = err;
2491 tcp_sack_rxmit(tcp_t *tcp, int sock_id)
2585 (void) ipv4_tcp_output(sock_id, xmit_mp);
2612 tcp_rput_data(tcp_t *tcp, mblk_t *mp, int sock_id)
2637 sock_id, mp, mp->b_datap);
2699 &sock_id)) != NULL ||
2701 tcph->tha_fport, &sock_id)) != NULL) {
2734 tcp_xmit_listeners_reset(sock_id, mp, ip_hdr_len);
2743 seg_len, (tcph_t *)tcph, sock_id);
2763 "pointer\n", sock_id);
2777 sock_id, mp, seg_ack, 0, TH_RST,
2789 tcp = tcp_conn_request(tcp, mp, sock_id, ip_hdr_len);
2820 ip_hdr_len, sock_id);
2828 tcp_clean_death(sock_id, tcp, ECONNREFUSED);
2928 (void) ipv4_tcp_output(sock_id, mp1);
2935 if (tcp_state_wait(sock_id, tcp, TCPS_ALL_ACKED) < 0) {
3123 tcp_rcv_drain(sock_id, tcp);
3261 (void) tcp_clean_death(sock_id, tcp, ECONNREFUSED);
3267 (void) tcp_clean_death(sock_id, tcp, ECONNRESET);
3271 (void) tcp_clean_death(sock_id, tcp, 0);
3275 (void) tcp_clean_death(sock_id, tcp, ENXIO);
3300 seg_seq + 1, TH_RST|TH_ACK, 0, sock_id);
3302 (void) tcp_clean_death(sock_id, tcp, ECONNRESET);
3329 tcp, NULL, seg_ack, 0, TH_RST, 0, sock_id);
3692 (void) ipv4_tcp_output(sock_id, mp);
3938 tcp->tcp_rnxt, TH_RST|TH_ACK, 0, sock_id);
4018 (void) tcp_clean_death(sock_id, tcp, 0);
4092 if (++tcp->tcp_rack_cnt == 2 || sockets[sock_id].inq == NULL) {
4150 (void) ipv4_tcp_output(sock_id, mp1);
4155 if (tcp_sack_rxmit(tcp, sock_id) != 0) {
4166 tcp_wput_data(tcp, NULL, sock_id);
4168 tcp_ss_rexmit(tcp, sock_id);
4175 if (sockets[sock_id].pcb == NULL)
4197 (void) ipv4_tcp_output(sock_id, mp1);
4212 tcp_ss_rexmit(tcp_t *tcp, int sock_id)
4255 (void) ipv4_tcp_output(sock_id, xmit_mp);
4286 tcp_wput_data(tcp, NULL, sock_id);
4296 tcp_timer(tcp_t *tcp, int sock_id)
4350 DEBUG_1("tcp_timer (%d): zero win", sock_id);
4437 tcp_wput_data(tcp, NULL, sock_id);
4455 (void) tcp_clean_death(sock_id, tcp, 0);
4458 DEBUG_3("tcp_timer (%d): strange state (%d) %s", sock_id,
4482 tcp->tcp_rnxt, TH_RST | TH_ACK, 0, sock_id);
4484 (void) tcp_clean_death(sock_id, tcp,
4574 (void) ipv4_tcp_output(sock_id, mp);
4609 tcp_wput_data(tcp_t *tcp, mblk_t *mp, int sock_id)
4628 printf("tcp_wput_data(%d) ##############################\n", sock_id);
4906 (void) ipv4_tcp_output(sock_id, mp);
5133 (void) ipv4_tcp_output(sock_id, mp);
5185 (void) tcp_state_wait(sock_id, tcp, TCPS_ALL_ACKED);
5204 int sock_id)
5217 /* Just make sure we send the right sock_id to tcp_clean_death */
5218 if ((sockets[sock_id].pcb == NULL) || (sockets[sock_id].pcb != tcp))
5219 sock_id = -1;
5304 tcp_clean_death(sock_id, tcp, 0);
5355 (void) tcp_clean_death(sock_id, tcp, 0);
5516 uint32_t ack, int ctl, uint_t ip_hdr_len, int sock_id)
5544 "ctl 0x%x\n", sock_id, str, seq, ack, ctl);
5548 dprintf("tcp_xmit_ctl(%d): Cannot allocate memory\n", sock_id);
5595 (void) ipv4_tcp_output(sock_id, mp);
6072 tcp_xmit_listeners_reset(int sock_id, mblk_t *mp, uint_t ip_hdr_len)
6093 sock_id, mp, seg_ack, 0, TH_RST, ip_hdr_len);
6097 tcp_xmit_early_reset("no tcp, reset/ack", sock_id,
6121 tcp_xmit_early_reset(char *str, int sock_id, mblk_t *mp, uint32_t seq,
6206 (void) ipv4_tcp_output(sock_id, mp);
6739 tcp_rcv_drain(int sock_id, tcp_t *tcp)
6747 if (sockets[sock_id].so_rcvbuf <= 0)
6776 add_grams(&sockets[sock_id].inq, in_gram);
6779 sockets[sock_id].so_rcvbuf -= in_mp->b_wptr - in_mp->b_rptr;
6786 if (sockets[sock_id].so_rcvbuf > 0 &&
6798 tcp_rcv_drain_sock(int sock_id)
6801 if ((tcp = sockets[sock_id].pcb) == NULL)
6803 tcp_rcv_drain(sock_id, tcp);
6812 tcp_drain_needed(int sock_id, tcp_t *tcp)
6817 sockets[sock_id].inq, tcp->tcp_rcv_list);
6819 if ((sockets[sock_id].inq != NULL) ||
6831 add_grams(&sockets[sock_id].inq, in_gram);
6967 tcp_xmit_end(tcp_t *tcp, int sock_id)
6993 (void) ipv4_tcp_output(sock_id, mp);