ip_ire.h revision 516bda921188ac42ecd8efd54cda7645912f2e09
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/* Copyright (c) 1990 Mentat Inc. */
#ifndef _INET_IP_IRE_H
#define _INET_IP_IRE_H
#pragma ident "%Z%%M% %I% %E% SMI"
#ifdef __cplusplus
extern "C" {
#endif
#define IP_CACHE_TABLE_SIZE 256
/* size. Only used by mipagent */
#define IP6_CACHE_TABLE_SIZE 256
/*
* We use the common modulo hash function. In ip_ire_init(), we make
* sure that the cache table size is always a power of 2. That's why
* we can use & instead of %. Also note that we try hard to make sure
* the lower bits of an address capture most info from the whole address.
* The reason being that since our hash table is probably a lot smaller
* than 2^32 buckets so the lower bits are the most important.
*/
((table_size) - 1))
/*
* Exclusive-or those bytes that are likely to contain the MAC
* address. Assumes EUI-64 format for good hashing.
*/
((table_size) - 1))
/* This assumes that the ftable size is a power of 2. */
/*
* match parameter definitions for
* IRE lookup routines.
*/
/* found. */
/* with RTF_REJECT or RTF_BLACKHOLE, */
/* return the ire. No recursive */
/* lookup should be done. */
/* IRE_MARK_HIDDEN. */
/*
* MATCH_IRE_ILL is used whenever we want to specifically match an IRE
* whose ire_ipif->ipif_ill or (ill_t *)ire_stq->q_ptr matches a given
* ill. When MATCH_IRE_ILL is used to locate an IRE_CACHE, it implies
* that the packet will not be load balanced. This is normally used
* by in.mpathd to send out failure detection probes.
*
* MATCH_IRE_ILL_GROUP is used whenever we are not specific about which
* interface (ill) the packet should be sent out. This implies that the
* packets will be subjected to load balancing and it might go out on
* any interface in the group. When there is only interface in the group,
* MATCH_IRE_ILL_GROUP becomes MATCH_IRE_ILL. Most of the code uses
* MATCH_IRE_ILL_GROUP and MATCH_IRE_ILL is used in very few cases where
* we want to disable load balancing.
*
* MATCH_IRE_PARENT is used whenever we unconditionally want to get the
* parent IRE (sire) while recursively searching IREs for an offsubnet
* destination. With this flag, even if no IRE_CACHETABLE or IRE_INTERFACE
* is found to help resolving IRE_OFFSUBNET in lookup routines, the
* IRE_OFFSUBNET sire, if any, is returned to the caller.
*/
/* even if ire is not matched. */
/* don't match IRE_LOCALs from other */
/* zones or shared IREs */
/* IRE_MARK_PRIVATE_ADDR. */
/* IRE_CACHE entry only if it is */
/* ND_REACHABLE */
/*
* Any ire to nce association is long term, and
* the refhold and refrele may be done by different
* threads. So all cases of making or breaking ire to
* nce association should all effectively use the NOTR variants.
* To understand the *effectively* part read on.
*
* ndp_lookup() and ndp_add() implicitly does NCE_REFHOLD. So wherever we
* make ire to nce association after calling these functions,
* we effectively want to end up with NCE_REFHOLD_NOTR,
* We call this macro to achieve this effect. This macro changes
* a NCE_REFHOLD to a NCE_REFHOLD_NOTR. The macro's NCE_REFRELE
* cancels off ndp_lookup[ndp_add]'s implicit NCE_REFHOLD, and what
* you are left with is a NCE_REFHOLD_NOTR
*/
#define NCE_REFHOLD_TO_REFHOLD_NOTR(nce) { \
NCE_REFHOLD_NOTR(nce); \
NCE_REFRELE(nce); \
}
/*
* find the next ire_t entry in the ire_next chain starting at ire
* that is not CONDEMNED. ire is set to NULL if we reach the end of the list.
* Caller must hold the ire_bucket lock.
*/
#define IRE_FIND_NEXT_ORIGIN(ire) { \
}
/* Structure for ire_cache_count() */
typedef struct {
int icc_total; /* Total number of IRE_CACHE */
int icc_unused; /* # off/no PMTU unused since last reclaim */
int icc_offlink; /* # offlink without PMTU information */
int icc_pmtu; /* # offlink with PMTU information */
int icc_onlink; /* # onlink */
/*
* Structure for ire_cache_reclaim(). Each field is a fraction i.e. 1 meaning
* reclaim all, N meaning reclaim 1/Nth of all entries, 0 meaning reclaim none.
*/
typedef struct {
int icr_unused; /* Fraction for unused since last reclaim */
int icr_offlink; /* Fraction for offlink without PMTU info */
int icr_pmtu; /* Fraction for offlink with PMTU info */
int icr_onlink; /* Fraction for onlink */
typedef struct {
} ire_stats_t;
extern ire_stats_t ire_stats_v4;
extern uint32_t ip_cache_table_size;
extern uint32_t ip6_cache_table_size;
extern irb_t *ip_cache_table;
extern uint32_t ip6_ftable_hash_size;
/*
* We use atomics so that we get an accurate accounting on the ires.
* Otherwise we can't determine leaks correctly.
*/
extern irb_t *ip_forwarding_table_v6[];
extern irb_t *ip_cache_table_v6;
extern irb_t *ip_mrtun_table;
extern irb_t *ip_srcif_table;
extern kmutex_t ire_ft_init_lock;
extern kmutex_t ire_mrtun_lock;
extern kmutex_t ire_srcif_table_lock;
extern ire_stats_t ire_stats_v6;
extern uint_t ire_mrtun_count;
extern uint_t ire_srcif_table_count;
#ifdef _KERNEL
struct ts_label_s;
extern void ip_ire_clookup_and_delete_v6(const in6_addr_t *);
extern void ire_report_ftable(ire_t *, char *);
extern int ip_mask_to_plen(ipaddr_t);
extern int ip_mask_to_plen_v6(const in6_addr_t *);
extern void ire_cache_count(ire_t *, char *);
const struct ts_label_s *);
const struct ts_label_s *);
extern void ire_cache_reclaim(ire_t *, char *);
boolean_t *);
tsol_gc_t *, tsol_gcgrp_t *);
tsol_gc_t *, tsol_gcgrp_t *);
ire_t **, int);
tsol_gc_t *, tsol_gcgrp_t *);
tsol_gc_t *, tsol_gcgrp_t *);
tsol_gc_t *, tsol_gcgrp_t *);
zoneid_t, const struct ts_label_s *, int);
extern void ire_delete(ire_t *);
extern void ire_delete_cache_gw(ire_t *, char *);
extern void ire_delete_cache_gw_v6(ire_t *, char *);
extern void ire_delete_cache_v6(ire_t *, char *);
extern void ire_delete_srcif(ire_t *);
extern void ire_delete_v6(ire_t *);
extern void ire_expire(ire_t *, char *);
extern void ire_flush_cache_v4(ire_t *, int);
extern void ire_flush_cache_v6(ire_t *, int);
uint32_t, const struct ts_label_s *, int);
const struct ts_label_s *tsl);
extern void ire_refrele(ire_t *);
extern void ire_refrele_notr(ire_t *);
const struct ts_label_s *, int);
extern void ire_walk_srcif_table_v4(pfv_t, void *);
const struct ts_label_s *);
const struct ts_label_s *);
const struct ts_label_s *);
const struct ts_label_s *);
extern void ire_freemblk(ire_t *);
zoneid_t);
#endif /* _KERNEL */
#ifdef __cplusplus
}
#endif
#endif /* _INET_IP_IRE_H */