diff options
Diffstat (limited to 'usr/src/uts/common')
-rw-r--r-- | usr/src/uts/common/io/aggr/aggr_grp.c | 6 | ||||
-rw-r--r-- | usr/src/uts/common/io/mac/mac.c | 95 | ||||
-rw-r--r-- | usr/src/uts/common/io/mac/mac_datapath_setup.c | 81 | ||||
-rw-r--r-- | usr/src/uts/common/io/nxge/nxge_hio.c | 71 | ||||
-rw-r--r-- | usr/src/uts/common/io/nxge/nxge_hio_guest.c | 116 | ||||
-rw-r--r-- | usr/src/uts/common/io/nxge/nxge_main.c | 120 | ||||
-rw-r--r-- | usr/src/uts/common/io/nxge/nxge_rxdma.c | 76 | ||||
-rw-r--r-- | usr/src/uts/common/io/nxge/nxge_send.c | 72 | ||||
-rw-r--r-- | usr/src/uts/common/io/nxge/nxge_virtual.c | 3 | ||||
-rw-r--r-- | usr/src/uts/common/sys/mac_client_priv.h | 2 | ||||
-rw-r--r-- | usr/src/uts/common/sys/mac_impl.h | 6 | ||||
-rw-r--r-- | usr/src/uts/common/sys/mac_soft_ring.h | 3 | ||||
-rw-r--r-- | usr/src/uts/common/sys/nxge/nxge_hio.h | 34 |
13 files changed, 291 insertions, 394 deletions
diff --git a/usr/src/uts/common/io/aggr/aggr_grp.c b/usr/src/uts/common/io/aggr/aggr_grp.c index 8e080da083..c619144958 100644 --- a/usr/src/uts/common/io/aggr/aggr_grp.c +++ b/usr/src/uts/common/io/aggr/aggr_grp.c @@ -623,7 +623,8 @@ aggr_add_pseudo_rx_group(aggr_port_t *port, aggr_pseudo_rx_group_t *rx_grp) /* * Get the list the the underlying HW rings. */ - hw_rh_cnt = mac_hwrings_get(port->lp_mch, &port->lp_hwgh, hw_rh); + hw_rh_cnt = mac_hwrings_get(port->lp_mch, &port->lp_hwgh, hw_rh, + MAC_RING_TYPE_RX); if (port->lp_hwgh != NULL) { /* @@ -689,7 +690,8 @@ aggr_rem_pseudo_rx_group(aggr_port_t *port, aggr_pseudo_rx_group_t *rx_grp) goto done; ASSERT(rx_grp->arg_gh != NULL); - hw_rh_cnt = mac_hwrings_get(port->lp_mch, &hwgh, hw_rh); + hw_rh_cnt = mac_hwrings_get(port->lp_mch, &hwgh, hw_rh, + MAC_RING_TYPE_RX); /* * If hw_rh_cnt is 0, it means that the underlying port does not diff --git a/usr/src/uts/common/io/mac/mac.c b/usr/src/uts/common/io/mac/mac.c index 91d7aab331..21982219b9 100644 --- a/usr/src/uts/common/io/mac/mac.c +++ b/usr/src/uts/common/io/mac/mac.c @@ -1426,35 +1426,54 @@ mac_hwrings_rx_process(void *arg, mac_resource_handle_t srs, */ int mac_hwrings_get(mac_client_handle_t mch, mac_group_handle_t *hwgh, - mac_ring_handle_t *hwrh) + mac_ring_handle_t *hwrh, mac_ring_type_t rtype) { mac_client_impl_t *mcip = (mac_client_impl_t *)mch; - flow_entry_t *flent = mcip->mci_flent; - mac_group_t *grp = flent->fe_rx_ring_group; - mac_ring_t *ring; int cnt = 0; - /* - * The mac client did not reserve any RX group, return directly. - * This is probably because the underlying MAC does not support - * any RX groups. - */ - *hwgh = NULL; - if (grp == NULL) - return (0); + switch (rtype) { + case MAC_RING_TYPE_RX: { + flow_entry_t *flent = mcip->mci_flent; + mac_group_t *grp; + mac_ring_t *ring; - /* - * This RX group must be reserved by this mac client. - */ - ASSERT((grp->mrg_state == MAC_GROUP_STATE_RESERVED) && - (mch == (mac_client_handle_t)(MAC_RX_GROUP_ONLY_CLIENT(grp)))); + grp = flent->fe_rx_ring_group; + /* + * The mac client did not reserve any RX group, return directly. + * This is probably because the underlying MAC does not support + * any groups. + */ + *hwgh = NULL; + if (grp == NULL) + return (0); + /* + * This group must be reserved by this mac client. + */ + ASSERT((grp->mrg_state == MAC_GROUP_STATE_RESERVED) && + (mch == (mac_client_handle_t) + (MAC_RX_GROUP_ONLY_CLIENT(grp)))); + for (ring = grp->mrg_rings; + ring != NULL; ring = ring->mr_next, cnt++) { + ASSERT(cnt < MAX_RINGS_PER_GROUP); + hwrh[cnt] = (mac_ring_handle_t)ring; + } + *hwgh = (mac_group_handle_t)grp; + return (cnt); + } + case MAC_RING_TYPE_TX: { + mac_soft_ring_set_t *tx_srs; + mac_srs_tx_t *tx; - for (ring = grp->mrg_rings; ring != NULL; ring = ring->mr_next) { - ASSERT(cnt < MAX_RINGS_PER_GROUP); - hwrh[cnt++] = (mac_ring_handle_t)ring; + tx_srs = MCIP_TX_SRS(mcip); + tx = &tx_srs->srs_tx; + for (; cnt < tx->st_ring_count; cnt++) + hwrh[cnt] = tx->st_rings[cnt]; + return (cnt); + } + default: + ASSERT(B_FALSE); + return (-1); } - *hwgh = (mac_group_handle_t)grp; - return (cnt); } /* @@ -1524,6 +1543,22 @@ mac_hwring_poll(mac_ring_handle_t rh, int bytes_to_pickup) return (info->mri_poll(info->mri_driver, bytes_to_pickup)); } +/* + * Send packets through the selected tx ring. + */ +mblk_t * +mac_hwring_tx(mac_ring_handle_t rh, mblk_t *mp) +{ + mac_ring_t *ring = (mac_ring_t *)rh; + mac_ring_info_t *info = &ring->mr_info; + + ASSERT(ring->mr_type == MAC_RING_TYPE_TX); + ASSERT(ring->mr_state >= MR_INUSE); + ASSERT(info->mri_tx != NULL); + + return (info->mri_tx(info->mri_driver, mp)); +} + int mac_hwgroup_addmac(mac_group_handle_t gh, const uint8_t *addr) { @@ -3429,22 +3464,6 @@ mac_release_tx_ring(mac_ring_handle_t rh) } /* - * Send packets through a selected tx ring. - */ -mblk_t * -mac_ring_tx(mac_ring_handle_t rh, mblk_t *mp) -{ - mac_ring_t *ring = (mac_ring_t *)rh; - mac_ring_info_t *info = &ring->mr_info; - - ASSERT(ring->mr_type == MAC_RING_TYPE_TX); - ASSERT(ring->mr_state >= MR_INUSE); - ASSERT(info->mri_tx != NULL); - - return (info->mri_tx(info->mri_driver, mp)); -} - -/* * Find a ring from its index. */ mac_ring_t * diff --git a/usr/src/uts/common/io/mac/mac_datapath_setup.c b/usr/src/uts/common/io/mac/mac_datapath_setup.c index 7b8c4c6567..dc5b51cb80 100644 --- a/usr/src/uts/common/io/mac/mac_datapath_setup.c +++ b/usr/src/uts/common/io/mac/mac_datapath_setup.c @@ -2235,6 +2235,10 @@ mac_srs_group_teardown(mac_client_impl_t *mcip, flow_entry_t *flent, tx->st_group); tx->st_group = NULL; } + if (tx->st_ring_count != 0) { + kmem_free(tx->st_rings, + sizeof (mac_ring_handle_t) * tx->st_ring_count); + } if (tx->st_arg2 != NULL) { ASSERT(tx_srs->srs_type & SRST_TX); mac_release_tx_ring(tx->st_arg2); @@ -3203,7 +3207,7 @@ mac_tx_srs_setup(mac_client_impl_t *mcip, flow_entry_t *flent, mac_impl_t *mip = mcip->mci_mip; mac_soft_ring_set_t *tx_srs; int i, tx_ring_count = 0, tx_rings_reserved = 0; - mac_ring_handle_t *tx_ring = NULL; + mac_ring_handle_t *tx_rings = NULL; uint32_t soft_ring_type; mac_group_t *grp = NULL; mac_ring_t *ring; @@ -3221,7 +3225,7 @@ mac_tx_srs_setup(mac_client_impl_t *mcip, flow_entry_t *flent, } if (tx_ring_count != 0) { - tx_ring = kmem_zalloc(sizeof (mac_ring_handle_t) * + tx_rings = kmem_zalloc(sizeof (mac_ring_handle_t) * tx_ring_count, KM_SLEEP); } @@ -3231,8 +3235,12 @@ mac_tx_srs_setup(mac_client_impl_t *mcip, flow_entry_t *flent, * NIC's. */ if (srs_type == SRST_FLOW || - (mcip->mci_state_flags & MCIS_NO_HWRINGS) != 0) - goto use_default_ring; + (mcip->mci_state_flags & MCIS_NO_HWRINGS) != 0) { + /* use default ring */ + tx_rings[0] = (void *)mip->mi_default_tx_ring; + tx_rings_reserved++; + goto rings_assigned; + } if (mcip->mci_share != NULL) ring = grp->mrg_rings; @@ -3245,8 +3253,7 @@ mac_tx_srs_setup(mac_client_impl_t *mcip, flow_entry_t *flent, * then each Tx ring will have a Tx-side soft ring. All * these soft rings will be hang off Tx SRS. */ - for (i = 0, tx_rings_reserved = 0; - i < tx_ring_count; i++, tx_rings_reserved++) { + for (i = 0; i < tx_ring_count; i++) { if (mcip->mci_share != NULL) { /* * The ring was already chosen and associated @@ -3255,42 +3262,39 @@ mac_tx_srs_setup(mac_client_impl_t *mcip, flow_entry_t *flent, * between the share and non-share cases. */ ASSERT(ring != NULL); - tx_ring[i] = (mac_ring_handle_t)ring; + tx_rings[i] = (mac_ring_handle_t)ring; ring = ring->mr_next; } else { - tx_ring[i] = + tx_rings[i] = (mac_ring_handle_t)mac_reserve_tx_ring(mip, NULL); - if (tx_ring[i] == NULL) + if (tx_rings[i] == NULL) { + /* + * We have run out of Tx rings. So + * give the default ring too. + */ + tx_rings[i] = (void *)mip->mi_default_tx_ring; + tx_rings_reserved++; break; + } } + tx_rings_reserved++; } + +rings_assigned: if (mac_tx_serialize || (mip->mi_v12n_level & MAC_VIRT_SERIALIZE)) serialize = B_TRUE; /* * Did we get the requested number of tx rings? - * There are 3 actions we can take depending upon the number + * There are 2 actions we can take depending upon the number * of tx_rings we got. - * 1) If we got none, then hook up the tx_srs with the - * default ring. - * 2) If we got one, then get the tx_ring from the soft ring, + * 1) If we got one, then get the tx_ring from the soft ring, * save it in SRS and free up the soft ring. - * 3) If we got more than 1, then do the tx fanout among the + * 2) If we got more than 1, then do the tx fanout among the * rings we obtained. */ - switch (tx_rings_reserved) { - case 1: - /* - * No need to allocate Tx soft rings. Tx-side soft - * rings are for Tx fanout case. Just use Tx SRS. - */ - /* FALLTHRU */ - - case 0: -use_default_ring: - if (tx_rings_reserved == 0) - tx->st_arg2 = (void *)mip->mi_default_tx_ring; - else - tx->st_arg2 = (void *)tx_ring[0]; + ASSERT(tx_rings_reserved != 0); + if (tx_rings_reserved == 1) { + tx->st_arg2 = (void *)tx_rings[0]; /* For ring_count of 0 or 1, set the tx_mode and return */ if (tx_srs->srs_type & SRST_BW_CONTROL) tx->st_mode = SRS_TX_BW; @@ -3298,18 +3302,9 @@ use_default_ring: tx->st_mode = SRS_TX_SERIALIZE; else tx->st_mode = SRS_TX_DEFAULT; - break; - - default: + } else { /* * We got multiple Tx rings for Tx fanout. - * - * cpuid of -1 is passed. This creates an unbound - * worker thread. Instead the code should get CPU - * binding information and pass that to - * mac_soft_ring_create(). This needs to be done - * in conjunction with Rx-side soft ring - * bindings. */ soft_ring_type = ST_RING_OTH | ST_RING_TX; if (tx_srs->srs_type & SRST_BW_CONTROL) { @@ -3322,7 +3317,7 @@ use_default_ring: for (i = 0; i < tx_rings_reserved; i++) { (void) mac_soft_ring_create(i, 0, NULL, soft_ring_type, maxclsyspri, mcip, tx_srs, -1, NULL, mcip, - (mac_resource_handle_t)tx_ring[i]); + (mac_resource_handle_t)tx_rings[i]); } mac_srs_update_fanout_list(tx_srs); } @@ -3332,8 +3327,12 @@ use_default_ring: int, tx->st_mode, int, tx_srs->srs_oth_ring_count); if (tx_ring_count != 0) { - kmem_free(tx_ring, - sizeof (mac_ring_handle_t) * tx_ring_count); + tx->st_ring_count = tx_rings_reserved; + tx->st_rings = kmem_zalloc(sizeof (mac_ring_handle_t) * + tx_rings_reserved, KM_SLEEP); + for (i = 0; i < tx->st_ring_count; i++) + tx->st_rings[i] = tx_rings[i]; + kmem_free(tx_rings, sizeof (mac_ring_handle_t) * tx_ring_count); } } diff --git a/usr/src/uts/common/io/nxge/nxge_hio.c b/usr/src/uts/common/io/nxge/nxge_hio.c index 827553301c..b58acde5e8 100644 --- a/usr/src/uts/common/io/nxge/nxge_hio.c +++ b/usr/src/uts/common/io/nxge/nxge_hio.c @@ -41,9 +41,6 @@ #include <sys/nxge/nxge_txdma.h> #include <sys/nxge/nxge_hio.h> -#define NXGE_HIO_SHARE_MIN_CHANNELS 2 -#define NXGE_HIO_SHARE_MAX_CHANNELS 2 - /* * External prototypes */ @@ -1057,23 +1054,6 @@ nxge_hio_init( NXGE_DEBUG_MSG((nxge, HIO_CTL, "Hybrid IO-capable service domain")); return (NXGE_OK); - } else { - /* - * isLDOMguest(nxge) == B_TRUE - */ - nx_vio_fp_t *vio; - nhd->type = NXGE_HIO_TYPE_GUEST; - - vio = &nhd->hio.vio; - vio->__register = (vio_net_resource_reg_t) - modgetsymvalue("vio_net_resource_reg", 0); - vio->unregister = (vio_net_resource_unreg_t) - modgetsymvalue("vio_net_resource_unreg", 0); - - if (vio->__register == 0 || vio->unregister == 0) { - NXGE_ERROR_MSG((nxge, VIR_CTL, "vio_net is absent!")); - return (NXGE_ERROR); - } } return (0); @@ -1144,12 +1124,16 @@ nxge_hio_clear_unicst(p_nxge_t nxgep, const uint8_t *mac_addr) static int nxge_hio_add_mac(void *arg, const uint8_t *mac_addr) { - nxge_ring_group_t *group = (nxge_ring_group_t *)arg; - p_nxge_t nxge = group->nxgep; - int rv; - nxge_hio_vr_t *vr; /* The Virtualization Region */ + nxge_ring_group_t *group = (nxge_ring_group_t *)arg; + p_nxge_t nxge = group->nxgep; + int rv; + nxge_hio_vr_t *vr; /* The Virtualization Region */ ASSERT(group->type == MAC_RING_TYPE_RX); + ASSERT(group->nxgep != NULL); + + if (isLDOMguest(group->nxgep)) + return (0); mutex_enter(nxge->genlock); @@ -1174,8 +1158,7 @@ nxge_hio_add_mac(void *arg, const uint8_t *mac_addr) /* * Program the mac address for the group. */ - if ((rv = nxge_hio_group_mac_add(nxge, group, - mac_addr)) != 0) { + if ((rv = nxge_hio_group_mac_add(nxge, group, mac_addr)) != 0) { return (rv); } @@ -1206,6 +1189,10 @@ nxge_hio_rem_mac(void *arg, const uint8_t *mac_addr) int rv, slot; ASSERT(group->type == MAC_RING_TYPE_RX); + ASSERT(group->nxgep != NULL); + + if (isLDOMguest(group->nxgep)) + return (0); mutex_enter(nxge->genlock); @@ -1253,14 +1240,16 @@ nxge_hio_group_start(mac_group_driver_t gdriver) int dev_gindex; ASSERT(group->type == MAC_RING_TYPE_RX); + ASSERT(group->nxgep != NULL); -#ifdef later ASSERT(group->nxgep->nxge_mac_state == NXGE_MAC_STARTED); -#endif if (group->nxgep->nxge_mac_state != NXGE_MAC_STARTED) return (ENXIO); mutex_enter(group->nxgep->genlock); + if (isLDOMguest(group->nxgep)) + goto nxge_hio_group_start_exit; + dev_gindex = group->nxgep->pt_config.hw_config.def_mac_rxdma_grpid + group->gindex; rdc_grp_p = &group->nxgep->pt_config.rdc_grps[dev_gindex]; @@ -1289,9 +1278,9 @@ nxge_hio_group_start(mac_group_driver_t gdriver) (void) nxge_init_fzc_rdc_tbl(group->nxgep, rdc_grp_p, rdctbl); +nxge_hio_group_start_exit: group->started = B_TRUE; mutex_exit(group->nxgep->genlock); - return (0); } @@ -1305,6 +1294,9 @@ nxge_hio_group_stop(mac_group_driver_t gdriver) mutex_enter(group->nxgep->genlock); group->started = B_FALSE; + if (isLDOMguest(group->nxgep)) + goto nxge_hio_group_stop_exit; + /* * Unbind the RDC table previously bound for this group. * @@ -1314,6 +1306,7 @@ nxge_hio_group_stop(mac_group_driver_t gdriver) if (group->gindex != 0) (void) nxge_fzc_rdc_tbl_unbind(group->nxgep, group->rdctbl); +nxge_hio_group_stop_exit: mutex_exit(group->nxgep->genlock); } @@ -1334,20 +1327,26 @@ nxge_hio_group_get(void *arg, mac_ring_type_t type, int groupid, group->gindex = groupid; group->sindex = 0; /* not yet bound to a share */ - dev_gindex = nxgep->pt_config.hw_config.def_mac_rxdma_grpid + - groupid; + if (!isLDOMguest(nxgep)) { + dev_gindex = + nxgep->pt_config.hw_config.def_mac_rxdma_grpid + + groupid; - if (nxgep->pt_config.hw_config.def_mac_rxdma_grpid == - dev_gindex) - group->port_default_grp = B_TRUE; + if (nxgep->pt_config.hw_config.def_mac_rxdma_grpid == + dev_gindex) + group->port_default_grp = B_TRUE; + + infop->mgi_count = + nxgep->pt_config.rdc_grps[dev_gindex].max_rdcs; + } else { + infop->mgi_count = NXGE_HIO_SHARE_MAX_CHANNELS; + } infop->mgi_driver = (mac_group_driver_t)group; infop->mgi_start = nxge_hio_group_start; infop->mgi_stop = nxge_hio_group_stop; infop->mgi_addmac = nxge_hio_add_mac; infop->mgi_remmac = nxge_hio_rem_mac; - infop->mgi_count = - nxgep->pt_config.rdc_grps[dev_gindex].max_rdcs; break; case MAC_RING_TYPE_TX: diff --git a/usr/src/uts/common/io/nxge/nxge_hio_guest.c b/usr/src/uts/common/io/nxge/nxge_hio_guest.c index eb05298299..78c1bb53a6 100644 --- a/usr/src/uts/common/io/nxge/nxge_hio_guest.c +++ b/usr/src/uts/common/io/nxge/nxge_hio_guest.c @@ -35,46 +35,9 @@ #include <sys/nxge/nxge_fzc.h> #include <sys/nxge/nxge_rxdma.h> #include <sys/nxge/nxge_txdma.h> - #include <sys/nxge/nxge_hio.h> /* - * nxge_hio_unregister - * - * Unregister with the VNET module. - * - * Arguments: - * nxge - * - * Notes: - * We must uninitialize all DMA channels associated with the VR, too. - * - * We're assuming that the channels will be disabled & unassigned - * in the service domain, after we're done here. - * - * Context: - * Guest domain - */ -void -nxge_hio_unregister( - nxge_t *nxge) -{ - nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; - - if (nhd == 0) { - return; - } - -#if defined(sun4v) - /* Unregister with vNet. */ - if (nhd->hio.vio.unregister) { - if (nxge->hio_vr) - (*nhd->hio.vio.unregister)(nxge->hio_vr->vhp); - } -#endif -} - -/* * nxge_guest_regs_map * * Map in a guest domain's register set(s). @@ -95,8 +58,7 @@ static ddi_device_acc_attr_t nxge_guest_register_access_attributes = { }; int -nxge_guest_regs_map( - nxge_t *nxge) +nxge_guest_regs_map(nxge_t *nxge) { dev_regs_t *regs; off_t regsize; @@ -211,31 +173,22 @@ static void nxge_check_guest_state(nxge_hio_vr_t *); int nxge_hio_vr_add(nxge_t *nxge) { - extern mac_callbacks_t nxge_m_callbacks; - - nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; - nxge_hio_vr_t *vr; - nxge_hio_dc_t *dc; - - int *reg_val; - uint_t reg_len; - uint8_t vr_index; - - nxhv_vr_fp_t *fp; - uint64_t vr_address, vr_size; - uint32_t cookie; + extern nxge_status_t nxge_mac_register(p_nxge_t); - nxhv_dc_fp_t *tx, *rx; - uint64_t tx_map, rx_map; - - uint64_t hv_rv; - - /* Variables needed to register with vnet. */ - mac_register_t *mac_info; - ether_addr_t mac_addr; - nx_vio_fp_t *vio; - - int i; + nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; + nxge_hio_vr_t *vr; + nxge_hio_dc_t *dc; + int *reg_val; + uint_t reg_len; + uint8_t vr_index; + nxhv_vr_fp_t *fp; + uint64_t vr_address, vr_size; + uint32_t cookie; + nxhv_dc_fp_t *tx, *rx; + uint64_t tx_map, rx_map; + uint64_t hv_rv; + int i; + nxge_status_t status; NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_add")); @@ -384,40 +337,13 @@ nxge_hio_vr_add(nxge_t *nxge) } } - /* - * Register with vnet. - */ - if ((mac_info = mac_alloc(MAC_VERSION)) == NULL) - return (NXGE_ERROR); - - mac_info->m_type_ident = MAC_PLUGIN_IDENT_ETHER; - mac_info->m_driver = nxge; - mac_info->m_dip = nxge->dip; - mac_info->m_src_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP); - mac_info->m_dst_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP); - (void) memset(mac_info->m_src_addr, 0xff, sizeof (MAXMACADDRLEN)); - mac_info->m_callbacks = &nxge_m_callbacks; - mac_info->m_min_sdu = 0; - mac_info->m_max_sdu = NXGE_MTU_DEFAULT_MAX - - sizeof (struct ether_header) - ETHERFCSL - 4; - - (void) memset(&mac_addr, 0xff, sizeof (mac_addr)); - - /* Register with vio_net. */ - vio = &nhd->hio.vio; - if ((*vio->__register)(mac_info, VIO_NET_RES_HYBRID, - nxge->hio_mac_addr, mac_addr, &vr->vhp, &vio->cb)) { - NXGE_DEBUG_MSG((nxge, HIO_CTL, "HIO registration() failed")); - KMEM_FREE(mac_info->m_src_addr, MAXMACADDRLEN); - KMEM_FREE(mac_info->m_dst_addr, MAXMACADDRLEN); - mac_free(mac_info); - return (NXGE_ERROR); + status = nxge_mac_register(nxge); + if (status != NXGE_OK) { + cmn_err(CE_WARN, "nxge(%d): nxge_mac_register failed\n", + nxge->instance); + return (status); } - KMEM_FREE(mac_info->m_src_addr, MAXMACADDRLEN); - KMEM_FREE(mac_info->m_dst_addr, MAXMACADDRLEN); - mac_free(mac_info); - nxge->hio_vr = vr; /* For faster lookups. */ NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_add")); diff --git a/usr/src/uts/common/io/nxge/nxge_main.c b/usr/src/uts/common/io/nxge/nxge_main.c index a1ab453851..c0020bdac4 100644 --- a/usr/src/uts/common/io/nxge/nxge_main.c +++ b/usr/src/uts/common/io/nxge/nxge_main.c @@ -272,14 +272,11 @@ static void nxge_m_stop(void *); static int nxge_m_multicst(void *, boolean_t, const uint8_t *); static int nxge_m_promisc(void *, boolean_t); static void nxge_m_ioctl(void *, queue_t *, mblk_t *); -static nxge_status_t nxge_mac_register(p_nxge_t); +nxge_status_t nxge_mac_register(p_nxge_t); static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, int slot, int rdctbl, boolean_t usetbl); void nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, boolean_t factory); -#if defined(sun4v) -extern mblk_t *nxge_m_tx(void *arg, mblk_t *mp); -#endif static void nxge_m_getfactaddr(void *, uint_t, uint8_t *); static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); @@ -630,11 +627,6 @@ nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) if (nxgep->niu_type != N2_NIU) { nxge_set_pci_replay_timeout(nxgep); } -#if defined(sun4v) - if (isLDOMguest(nxgep)) { - nxge_m_callbacks.mc_tx = nxge_m_tx; - } -#endif #if defined(sun4v) /* This is required by nxge_hio_init(), which follows. */ @@ -961,11 +953,7 @@ nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); - if (isLDOMguest(nxgep)) { - if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) - nxge_m_stop((void *)nxgep); - nxge_hio_unregister(nxgep); - } else if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { + if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_detach status = 0x%08X", status)); return (DDI_FAILURE); @@ -4294,10 +4282,13 @@ nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) case MAC_CAPAB_MULTIFACTADDR: { mac_capab_multifactaddr_t *mfacp = cap_data; - mutex_enter(nxgep->genlock); - mfacp->mcm_naddr = nxgep->nxge_mmac_info.num_factory_mmac; - mfacp->mcm_getaddr = nxge_m_getfactaddr; - mutex_exit(nxgep->genlock); + if (!isLDOMguest(nxgep)) { + mutex_enter(nxgep->genlock); + mfacp->mcm_naddr = + nxgep->nxge_mmac_info.num_factory_mmac; + mfacp->mcm_getaddr = nxge_m_getfactaddr; + mutex_exit(nxgep->genlock); + } break; } @@ -4325,34 +4316,68 @@ nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) mutex_enter(nxgep->genlock); if (cap_rings->mr_type == MAC_RING_TYPE_RX) { - cap_rings->mr_group_type = MAC_GROUP_TYPE_DYNAMIC; - cap_rings->mr_rnum = p_cfgp->max_rdcs; - cap_rings->mr_rget = nxge_fill_ring; - cap_rings->mr_gnum = p_cfgp->max_rdc_grpids; - cap_rings->mr_gget = nxge_hio_group_get; - cap_rings->mr_gaddring = nxge_group_add_ring; - cap_rings->mr_gremring = nxge_group_rem_ring; + if (isLDOMguest(nxgep)) { + cap_rings->mr_group_type = + MAC_GROUP_TYPE_STATIC; + cap_rings->mr_rnum = + NXGE_HIO_SHARE_MAX_CHANNELS; + cap_rings->mr_rget = nxge_fill_ring; + cap_rings->mr_gnum = 1; + cap_rings->mr_gget = nxge_hio_group_get; + cap_rings->mr_gaddring = NULL; + cap_rings->mr_gremring = NULL; + } else { + /* + * Service Domain. + */ + cap_rings->mr_group_type = + MAC_GROUP_TYPE_DYNAMIC; + cap_rings->mr_rnum = p_cfgp->max_rdcs; + cap_rings->mr_rget = nxge_fill_ring; + cap_rings->mr_gnum = p_cfgp->max_rdc_grpids; + cap_rings->mr_gget = nxge_hio_group_get; + cap_rings->mr_gaddring = nxge_group_add_ring; + cap_rings->mr_gremring = nxge_group_rem_ring; + } NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_getcapab: rx nrings[%d] ngroups[%d]", p_cfgp->max_rdcs, p_cfgp->max_rdc_grpids)); } else { - cap_rings->mr_group_type = MAC_GROUP_TYPE_DYNAMIC; - cap_rings->mr_rnum = p_cfgp->tdc.count; - cap_rings->mr_rget = nxge_fill_ring; - if (isLDOMservice(nxgep)) { - /* share capable */ - /* Do not report the default ring: hence -1 */ + /* + * TX Rings. + */ + if (isLDOMguest(nxgep)) { + cap_rings->mr_group_type = + MAC_GROUP_TYPE_STATIC; + cap_rings->mr_rnum = + NXGE_HIO_SHARE_MAX_CHANNELS; + cap_rings->mr_rget = nxge_fill_ring; + cap_rings->mr_gnum = 0; + cap_rings->mr_gget = NULL; + cap_rings->mr_gaddring = NULL; + cap_rings->mr_gremring = NULL; + } else { + /* + * Service Domain. + */ + cap_rings->mr_group_type = + MAC_GROUP_TYPE_DYNAMIC; + cap_rings->mr_rnum = p_cfgp->tdc.count; + cap_rings->mr_rget = nxge_fill_ring; + + /* + * Share capable. + * + * Do not report the default group: hence -1 + */ cap_rings->mr_gnum = NXGE_MAX_TDC_GROUPS / nxgep->nports - 1; - } else { - cap_rings->mr_gnum = 0; + cap_rings->mr_gget = nxge_hio_group_get; + cap_rings->mr_gaddring = nxge_group_add_ring; + cap_rings->mr_gremring = nxge_group_rem_ring; } - cap_rings->mr_gget = nxge_hio_group_get; - cap_rings->mr_gaddring = nxge_group_add_ring; - cap_rings->mr_gremring = nxge_group_rem_ring; - NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_m_getcapab: tx rings # of rings %d", p_cfgp->tdc.count)); @@ -6372,7 +6397,7 @@ nxge_intrs_disable(p_nxge_t nxgep) NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); } -static nxge_status_t +nxge_status_t nxge_mac_register(p_nxge_t nxgep) { mac_register_t *macp; @@ -6386,7 +6411,13 @@ nxge_mac_register(p_nxge_t nxgep) macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; macp->m_driver = nxgep; macp->m_dip = nxgep->dip; - macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; + if (!isLDOMguest(nxgep)) { + macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; + } else { + macp->m_src_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP); + macp->m_dst_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP); + (void) memset(macp->m_src_addr, 0xff, sizeof (MAXMACADDRLEN)); + } macp->m_callbacks = &nxge_m_callbacks; macp->m_min_sdu = 0; nxgep->mac.default_mtu = nxgep->mac.maxframesize - @@ -6395,7 +6426,12 @@ nxge_mac_register(p_nxge_t nxgep) macp->m_margin = VLAN_TAGSZ; macp->m_priv_props = nxge_priv_props; macp->m_priv_prop_count = NXGE_MAX_PRIV_PROPS; - macp->m_v12n = MAC_VIRT_HIO | MAC_VIRT_LEVEL1 | MAC_VIRT_SERIALIZE; + if (isLDOMguest(nxgep)) { + macp->m_v12n = MAC_VIRT_LEVEL1 | MAC_VIRT_SERIALIZE; + } else { + macp->m_v12n = MAC_VIRT_HIO | MAC_VIRT_LEVEL1 | \ + MAC_VIRT_SERIALIZE; + } NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_mac_register: instance %d " @@ -6406,6 +6442,10 @@ nxge_mac_register(p_nxge_t nxgep) NXGE_EHEADER_VLAN_CRC)); status = mac_register(macp, &nxgep->mach); + if (isLDOMguest(nxgep)) { + KMEM_FREE(macp->m_src_addr, MAXMACADDRLEN); + KMEM_FREE(macp->m_dst_addr, MAXMACADDRLEN); + } mac_free(macp); if (status != 0) { diff --git a/usr/src/uts/common/io/nxge/nxge_rxdma.c b/usr/src/uts/common/io/nxge/nxge_rxdma.c index 313e76c8f0..4b427d1a8d 100644 --- a/usr/src/uts/common/io/nxge/nxge_rxdma.c +++ b/usr/src/uts/common/io/nxge/nxge_rxdma.c @@ -1756,7 +1756,7 @@ nxge_rx_intr(void *arg1, void *arg2) uint8_t channel; npi_handle_t handle; rx_dma_ctl_stat_t cs; - p_rx_rcr_ring_t rcr_ring; + p_rx_rcr_ring_t rcrp; mblk_t *mp = NULL; if (ldvp == NULL) { @@ -1789,7 +1789,7 @@ nxge_rx_intr(void *arg1, void *arg2) /* * Get the ring to enable us to process packets. */ - rcr_ring = nxgep->rx_rcr_rings->rcr_rings[ldvp->vdma_index]; + rcrp = nxgep->rx_rcr_rings->rcr_rings[ldvp->vdma_index]; /* * The RCR ring lock must be held when packets @@ -1799,7 +1799,7 @@ nxge_rx_intr(void *arg1, void *arg2) * (will cause fatal errors such as rcrincon bit set) * and the setting of the poll_flag. */ - MUTEX_ENTER(&rcr_ring->lock); + MUTEX_ENTER(&rcrp->lock); /* * Get the control and status for this channel. @@ -1840,12 +1840,12 @@ nxge_rx_intr(void *arg1, void *arg2) mgm.value); } } - MUTEX_EXIT(&rcr_ring->lock); + MUTEX_EXIT(&rcrp->lock); return (DDI_INTR_CLAIMED); } - ASSERT(rcr_ring->ldgp == ldgp); - ASSERT(rcr_ring->ldvp == ldvp); + ASSERT(rcrp->ldgp == ldgp); + ASSERT(rcrp->ldvp == ldvp); RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); @@ -1856,8 +1856,8 @@ nxge_rx_intr(void *arg1, void *arg2) cs.bits.hdw.rcrto, cs.bits.hdw.rcrthres)); - if (rcr_ring->poll_flag == 0) { - mp = nxge_rx_pkts(nxgep, rcr_ring, cs, -1); + if (!rcrp->poll_flag) { + mp = nxge_rx_pkts(nxgep, rcrp, cs, -1); } /* error events. */ @@ -1873,27 +1873,34 @@ nxge_rx_intr(void *arg1, void *arg2) * these two edge triggered bits. */ cs.value &= RX_DMA_CTL_STAT_WR1C; - cs.bits.hdw.mex = rcr_ring->poll_flag ? 0 : 1; + cs.bits.hdw.mex = rcrp->poll_flag ? 0 : 1; RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, cs.value); /* * If the polling mode is enabled, disable the interrupt. */ - if (rcr_ring->poll_flag) { + if (rcrp->poll_flag) { NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p " "(disabling interrupts)", channel, ldgp, ldvp)); + /* * Disarm this logical group if this is a single device * group. */ if (ldgp->nldvs == 1) { - ldgimgm_t mgm; - mgm.value = 0; - mgm.bits.ldw.arm = 0; - NXGE_REG_WR64(handle, - LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value); + if (isLDOMguest(nxgep)) { + ldgp->arm = B_FALSE; + nxge_hio_ldgimgn(nxgep, ldgp); + } else { + ldgimgm_t mgm; + mgm.value = 0; + mgm.bits.ldw.arm = 0; + NXGE_REG_WR64(handle, + LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), + mgm.value); + } } } else { /* @@ -1920,24 +1927,11 @@ nxge_rx_intr(void *arg1, void *arg2) "==> nxge_rx_intr: rdc %d ldgp $%p " "exiting ISR (and call mac_rx_ring)", channel, ldgp)); } - MUTEX_EXIT(&rcr_ring->lock); + MUTEX_EXIT(&rcrp->lock); if (mp != NULL) { - if (!isLDOMguest(nxgep)) - mac_rx_ring(nxgep->mach, rcr_ring->rcr_mac_handle, mp, - rcr_ring->rcr_gen_num); -#if defined(sun4v) - else { /* isLDOMguest(nxgep) */ - nxge_hio_data_t *nhd = (nxge_hio_data_t *) - nxgep->nxge_hw_p->hio; - nx_vio_fp_t *vio = &nhd->hio.vio; - - if (vio->cb.vio_net_rx_cb) { - (*vio->cb.vio_net_rx_cb) - (nxgep->hio_vr->vhp, mp); - } - } -#endif + mac_rx_ring(nxgep->mach, rcrp->rcr_mac_handle, mp, + rcrp->rcr_gen_num); } NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: DDI_INTR_CLAIMED")); return (DDI_INTR_CLAIMED); @@ -2720,6 +2714,7 @@ nxge_enable_poll(void *arg) uint32_t channel; if (ring_handle == NULL) { + ASSERT(ring_handle != NULL); return (0); } @@ -2760,6 +2755,7 @@ nxge_disable_poll(void *arg) uint32_t channel; if (ring_handle == NULL) { + ASSERT(ring_handle != NULL); return (0); } @@ -2816,12 +2812,18 @@ nxge_disable_poll(void *arg) "==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)", ringp->rdc, ldgp)); if (ldgp->nldvs == 1) { - ldgimgm_t mgm; - mgm.value = 0; - mgm.bits.ldw.arm = 1; - mgm.bits.ldw.timer = ldgp->ldg_timer; - NXGE_REG_WR64(handle, - LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value); + if (isLDOMguest(nxgep)) { + ldgp->arm = B_TRUE; + nxge_hio_ldgimgn(nxgep, ldgp); + } else { + ldgimgm_t mgm; + mgm.value = 0; + mgm.bits.ldw.arm = 1; + mgm.bits.ldw.timer = ldgp->ldg_timer; + NXGE_REG_WR64(handle, + LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), + mgm.value); + } } ringp->poll_flag = 0; } diff --git a/usr/src/uts/common/io/nxge/nxge_send.c b/usr/src/uts/common/io/nxge/nxge_send.c index 16ce76ccad..4f7edf292a 100644 --- a/usr/src/uts/common/io/nxge/nxge_send.c +++ b/usr/src/uts/common/io/nxge/nxge_send.c @@ -66,20 +66,9 @@ nxge_tx_ring_task(void *arg) (void) nxge_txdma_reclaim(ring->nxgep, ring, 0); MUTEX_EXIT(&ring->lock); - if (!isLDOMguest(ring->nxgep) && !ring->tx_ring_offline) + if (!ring->tx_ring_offline) { mac_tx_ring_update(ring->nxgep->mach, ring->tx_ring_handle); -#if defined(sun4v) - else { - nxge_hio_data_t *nhd = - (nxge_hio_data_t *)ring->nxgep->nxge_hw_p->hio; - nx_vio_fp_t *vio = &nhd->hio.vio; - - /* Call back vnet. */ - if (vio->cb.vio_net_tx_update) { - (*vio->cb.vio_net_tx_update)(ring->nxgep->hio_vr->vhp); - } } -#endif } static void @@ -141,65 +130,6 @@ nxge_tx_ring_send(void *arg, mblk_t *mp) return ((mblk_t *)NULL); } -#if defined(sun4v) - -/* - * Hashing policy for load balancing over the set of TX rings - * available to the driver. - */ -static uint8_t nxge_tx_hash_policy = MAC_PKT_HASH_L4; - -/* - * nxge_m_tx() is needed for Hybrid I/O operation of the vnet in - * the guest domain. See CR 6778758 for long term solution. - * - * The guest domain driver will for now hash the packet - * to pick a DMA channel from the only group it has group 0. - */ - -mblk_t * -nxge_m_tx(void *arg, mblk_t *mp) -{ - p_nxge_t nxgep = (p_nxge_t)arg; - mblk_t *next; - uint64_t rindex; - p_tx_ring_t tx_ring_p; - int status; - - NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_m_tx")); - - /* - * Hash to pick a ring from Group 0, the only TX group - * for a guest domain driver. - */ - rindex = mac_pkt_hash(DL_ETHER, mp, nxge_tx_hash_policy, B_TRUE); - rindex = rindex % nxgep->pt_config.tdc_grps[0].max_tdcs; - - /* - * Get the ring handle. - */ - tx_ring_p = nxgep->tx_rings->rings[rindex]; - - while (mp != NULL) { - next = mp->b_next; - mp->b_next = NULL; - - status = nxge_start(nxgep, tx_ring_p, mp); - if (status != 0) { - mp->b_next = next; - nxge_tx_ring_dispatch(tx_ring_p); - return (mp); - } - - mp = next; - } - - NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_m_tx")); - return ((mblk_t *)NULL); -} - -#endif - int nxge_start(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, p_mblk_t mp) { diff --git a/usr/src/uts/common/io/nxge/nxge_virtual.c b/usr/src/uts/common/io/nxge/nxge_virtual.c index ff78d828d6..c0468f8fed 100644 --- a/usr/src/uts/common/io/nxge/nxge_virtual.c +++ b/usr/src/uts/common/io/nxge/nxge_virtual.c @@ -3994,6 +3994,9 @@ nxge_get_rxring_index(p_nxge_t nxgep, int groupid, int ringidx) p_dma_cfgp = &nxgep->pt_config; p_cfgp = &p_dma_cfgp->hw_config; + if (isLDOMguest(nxgep)) + return (ringidx); + for (i = 0; i < groupid; i++) { rdc_grp_p = &p_dma_cfgp->rdc_grps[p_cfgp->def_mac_rxdma_grpid + i]; diff --git a/usr/src/uts/common/sys/mac_client_priv.h b/usr/src/uts/common/sys/mac_client_priv.h index 4acc126a8f..6174dd1a72 100644 --- a/usr/src/uts/common/sys/mac_client_priv.h +++ b/usr/src/uts/common/sys/mac_client_priv.h @@ -120,7 +120,7 @@ extern void mac_rx_client_quiesce(mac_client_handle_t); extern void mac_rx_client_restart(mac_client_handle_t); extern void mac_srs_perm_quiesce(mac_client_handle_t, boolean_t); extern int mac_hwrings_get(mac_client_handle_t, mac_group_handle_t *, - mac_ring_handle_t *); + mac_ring_handle_t *, mac_ring_type_t); extern void mac_hwring_setup(mac_ring_handle_t, mac_resource_handle_t); extern void mac_hwring_teardown(mac_ring_handle_t); extern int mac_hwring_disable_intr(mac_ring_handle_t); diff --git a/usr/src/uts/common/sys/mac_impl.h b/usr/src/uts/common/sys/mac_impl.h index 3c2e30f37e..a93335606f 100644 --- a/usr/src/uts/common/sys/mac_impl.h +++ b/usr/src/uts/common/sys/mac_impl.h @@ -262,7 +262,7 @@ struct mac_group_s { #define MAC_RING_TX_DEFAULT(mip, mp) \ ((mip->mi_default_tx_ring == NULL) ? \ mip->mi_tx(mip->mi_driver, mp) : \ - mac_ring_tx(mip->mi_default_tx_ring, mp)) + mac_hwring_tx(mip->mi_default_tx_ring, mp)) #define MAC_TX(mip, ring, mp, mcip) { \ /* \ @@ -275,7 +275,7 @@ struct mac_group_s { (ring == NULL)) \ mp = MAC_RING_TX_DEFAULT(mip, mp); \ else \ - mp = mac_ring_tx(ring, mp); \ + mp = mac_hwring_tx(ring, mp); \ } /* mci_tx_flag */ @@ -585,7 +585,7 @@ extern int mac_group_addmac(mac_group_t *, const uint8_t *); extern int mac_group_remmac(mac_group_t *, const uint8_t *); extern int mac_rx_group_add_flow(mac_client_impl_t *, flow_entry_t *, mac_group_t *); -extern mblk_t *mac_ring_tx(mac_ring_handle_t, mblk_t *); +extern mblk_t *mac_hwring_tx(mac_ring_handle_t, mblk_t *); extern mac_ring_t *mac_reserve_tx_ring(mac_impl_t *, mac_ring_t *); extern void mac_release_tx_ring(mac_ring_handle_t); extern mac_group_t *mac_reserve_tx_group(mac_impl_t *, mac_share_handle_t); diff --git a/usr/src/uts/common/sys/mac_soft_ring.h b/usr/src/uts/common/sys/mac_soft_ring.h index 4973b84215..4b07fb4e9f 100644 --- a/usr/src/uts/common/sys/mac_soft_ring.h +++ b/usr/src/uts/common/sys/mac_soft_ring.h @@ -131,6 +131,9 @@ typedef struct mac_srs_tx_s { void *st_arg1; void *st_arg2; mac_group_t *st_group; /* TX group for share */ + uint32_t st_ring_count; /* no. of tx rings */ + mac_ring_handle_t *st_rings; + boolean_t st_woken_up; /* diff --git a/usr/src/uts/common/sys/nxge/nxge_hio.h b/usr/src/uts/common/sys/nxge/nxge_hio.h index d57a5424eb..b18f32e346 100644 --- a/usr/src/uts/common/sys/nxge/nxge_hio.h +++ b/usr/src/uts/common/sys/nxge/nxge_hio.h @@ -20,7 +20,7 @@ */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Copyright 2009 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ @@ -35,9 +35,6 @@ extern "C" { #include <nxge_ipp.h> #include <nxge_fflp.h> #include <sys/mac_provider.h> -#if defined(sun4v) -#include <sys/vnet_res.h> -#endif #define isLDOMservice(nxge) \ (nxge->environs == SOLARIS_SERVICE_DOMAIN) @@ -46,6 +43,9 @@ extern "C" { #define isLDOMs(nxge) \ (isLDOMservice(nxge) || isLDOMguest(nxge)) +#define NXGE_HIO_SHARE_MIN_CHANNELS 2 +#define NXGE_HIO_SHARE_MAX_CHANNELS 2 + /* ------------------------------------------------------------------ */ typedef uint8_t nx_rdc_t; typedef uint8_t nx_tdc_t; @@ -88,37 +88,19 @@ typedef struct { dc_getinfo getinfo; } nxhv_dc_fp_t; -#if defined(sun4v) -typedef struct { - vio_net_resource_reg_t __register; - vio_net_resource_unreg_t unregister; - - vio_net_callbacks_t cb; - -} nx_vio_fp_t; -#endif - typedef struct { boolean_t ldoms; - nxhv_vr_fp_t vr; nxhv_dc_fp_t tx; nxhv_dc_fp_t rx; - -#if defined(sun4v) - nx_vio_fp_t vio; -#endif - } nxhv_fp_t; /* ------------------------------------------------------------------ */ #define NXGE_VR_SR_MAX 8 /* There are 8 subregions (SR). */ typedef enum { - NXGE_HIO_TYPE_SERVICE, /* We are a service domain driver. */ NXGE_HIO_TYPE_GUEST /* We are a guest domain driver. */ - } nxge_hio_type_t; typedef enum { @@ -130,7 +112,6 @@ typedef enum { FUNC2_VIR = 0x5000000, FUNC3_MNT = 0x6000000, FUNC3_VIR = 0x7000000 - } vr_base_address_t; #define VR_STEP 0x2000000 @@ -146,7 +127,6 @@ typedef enum { /* 0-8 */ FUNC3_VIR0, FUNC3_VIR1, FUNC_VIR_MAX - } vr_region_t; typedef enum { @@ -159,13 +139,11 @@ typedef enum { VP_CHANNEL_6, VP_CHANNEL_7, VP_CHANNEL_MAX - } vp_channel_t; typedef enum { VP_BOUND_TX = 1, VP_BOUND_RX - } vpc_type_t; #define VP_VC_OFFSET(channel) (channel << 10) @@ -254,9 +232,6 @@ typedef struct nxge_hio_vr { ether_addr_t altmac; /* The alternate MAC address. */ int slot; /* According to nxge_m_mmac_add(). */ -#if defined(sun4v) - vio_net_handle_t vhp; /* The handle given to us by the vnet. */ -#endif nxge_grp_t rx_group; nxge_grp_t tx_group; @@ -273,7 +248,6 @@ typedef struct { uint64_t map; /* Currently unused */ int vector; /* The DDI vector number (index) */ - } hio_ldg_t; /* |