summaryrefslogtreecommitdiff
path: root/usr/src
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src')
-rw-r--r--usr/src/uts/common/io/hxge/hxge.h5
-rw-r--r--usr/src/uts/common/io/hxge/hxge_common.h62
-rw-r--r--usr/src/uts/common/io/hxge/hxge_fm.c5
-rw-r--r--usr/src/uts/common/io/hxge/hxge_main.c109
-rw-r--r--usr/src/uts/common/io/hxge/hxge_rxdma.c213
-rw-r--r--usr/src/uts/common/io/hxge/hxge_rxdma.h3
-rw-r--r--usr/src/uts/common/io/hxge/hxge_vmac.c39
7 files changed, 179 insertions, 257 deletions
diff --git a/usr/src/uts/common/io/hxge/hxge.h b/usr/src/uts/common/io/hxge/hxge.h
index d6b343de7c..b640405fca 100644
--- a/usr/src/uts/common/io/hxge/hxge.h
+++ b/usr/src/uts/common/io/hxge/hxge.h
@@ -401,11 +401,6 @@ struct _hxge_t {
kmutex_t vmac_lock;
kmutex_t pio_lock;
hxge_timeout timeout;
-
- int msix_count;
- int msix_index;
- uint32_t msix_table[32][3];
- uint32_t msix_table_check[1][3];
};
/*
diff --git a/usr/src/uts/common/io/hxge/hxge_common.h b/usr/src/uts/common/io/hxge/hxge_common.h
index 3a93f71660..818540bb51 100644
--- a/usr/src/uts/common/io/hxge/hxge_common.h
+++ b/usr/src/uts/common/io/hxge/hxge_common.h
@@ -18,8 +18,9 @@
*
* CDDL HEADER END
*/
+
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -50,37 +51,48 @@ extern "C" {
#endif
#define HXGE_RBR_RBB_MIN 128
-#define HXGE_RBR_RBB_MAX (64 * 128 -1)
+#define HXGE_RBR_RBB_MAX ((64 * 128) - 1)
+#if defined(__sparc)
+#define HXGE_RBR_RBB_DEFAULT 1536 /* Number of RBR Blocks */
+#else
#define HXGE_RBR_RBB_DEFAULT 2048 /* Number of RBR Blocks */
+#endif
#define HXGE_RCR_MIN (HXGE_RBR_RBB_MIN * 2)
#define HXGE_RCR_MAX 65504 /* 2^16 - 32 */
-/* 4096/256 for x86 and 8192/512 for Sparc */
+/*
+ * 4096/256 for x86 and 8192 / 256 for Sparc
+ * NOTE: RCR Ring Size should *not* enable bit 19 of the address.
+ */
+#if defined(__sparc)
+#define HXGE_RCR_DEFAULT (HXGE_RBR_RBB_DEFAULT * 32)
+#else
#define HXGE_RCR_DEFAULT (HXGE_RBR_RBB_DEFAULT * 16)
+#endif
#define HXGE_TX_RING_DEFAULT 2048
-#define HXGE_TX_RING_MAX (64 * 128 - 1)
-
-#define RBR_BKSIZE_4K 0
-#define RBR_BKSIZE_8K 1
-#define RBR_BKSIZE_4K_BYTES (4 * 1024)
-
-#define RBR_BUFSZ2_2K 0
-#define RBR_BUFSZ2_4K 1
-#define RBR_BUFSZ2_2K_BYTES (2 * 1024)
-#define RBR_BUFSZ2_4K_BYTES (4 * 1024)
-
-#define RBR_BUFSZ1_1K 0
-#define RBR_BUFSZ1_2K 1
-#define RBR_BUFSZ1_1K_BYTES 1024
-#define RBR_BUFSZ1_2K_BYTES (2 * 1024)
-
-#define RBR_BUFSZ0_256B 0
-#define RBR_BUFSZ0_512B 1
-#define RBR_BUFSZ0_1K 2
-#define RBR_BUFSZ0_256_BYTES 256
-#define RBR_BUFSZ0_512_BYTES 512
-#define RBR_BUFSZ0_1K_BYTES 1024
+#define HXGE_TX_RING_MAX ((64 * 128) - 1)
+
+#define RBR_BKSIZE_4K 0
+#define RBR_BKSIZE_8K 1
+#define RBR_BKSIZE_4K_BYTES (4 * 1024)
+
+#define RBR_BUFSZ2_2K 0
+#define RBR_BUFSZ2_4K 1
+#define RBR_BUFSZ2_2K_BYTES (2 * 1024)
+#define RBR_BUFSZ2_4K_BYTES (4 * 1024)
+
+#define RBR_BUFSZ1_1K 0
+#define RBR_BUFSZ1_2K 1
+#define RBR_BUFSZ1_1K_BYTES 1024
+#define RBR_BUFSZ1_2K_BYTES (2 * 1024)
+
+#define RBR_BUFSZ0_256B 0
+#define RBR_BUFSZ0_512B 1
+#define RBR_BUFSZ0_1K 2
+#define RBR_BUFSZ0_256_BYTES 256
+#define RBR_BUFSZ0_512_BYTES 512
+#define RBR_BUFSZ0_1K_BYTES 1024
/*
* VLAN table configuration
diff --git a/usr/src/uts/common/io/hxge/hxge_fm.c b/usr/src/uts/common/io/hxge/hxge_fm.c
index c7627f57a6..b74d928104 100644
--- a/usr/src/uts/common/io/hxge/hxge_fm.c
+++ b/usr/src/uts/common/io/hxge/hxge_fm.c
@@ -18,6 +18,7 @@
*
* CDDL HEADER END
*/
+
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
@@ -171,7 +172,7 @@ hxge_fm_ereport_attr_t hxge_fm_ereport_sw[] = {
void
hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr,
- ddi_dma_attr_t *dma_attr)
+ ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr)
{
ddi_iblock_cookie_t iblk;
@@ -216,8 +217,10 @@ hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr,
*/
if (DDI_FM_ACC_ERR_CAP(hxgep->fm_capabilities)) {
reg_attr->devacc_attr_access = DDI_FLAGERR_ACC;
+ desc_attr->devacc_attr_access = DDI_FLAGERR_ACC;
} else {
reg_attr->devacc_attr_access = DDI_DEFAULT_ACC;
+ desc_attr->devacc_attr_access = DDI_DEFAULT_ACC;
}
/*
diff --git a/usr/src/uts/common/io/hxge/hxge_main.c b/usr/src/uts/common/io/hxge/hxge_main.c
index 59948c3bf7..24d4bec784 100644
--- a/usr/src/uts/common/io/hxge/hxge_main.c
+++ b/usr/src/uts/common/io/hxge/hxge_main.c
@@ -69,9 +69,14 @@ uint32_t hxge_tx_lb_policy = HXGE_TX_LB_TCPUDP;
* hxge_rx_bcopy_size_type: receive buffer block size type.
* hxge_rx_threshold_lo: copy only up to tunable block size type.
*/
+#if defined(__sparc)
+hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_6;
+hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_4;
+#else
hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_NONE;
-hxge_rxbuf_type_t hxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_NONE;
+#endif
+hxge_rxbuf_type_t hxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
rtrace_t hpi_rtracebuf;
@@ -90,18 +95,18 @@ static void hxge_destroy_mutexes(p_hxge_t);
static hxge_status_t hxge_map_regs(p_hxge_t hxgep);
static void hxge_unmap_regs(p_hxge_t hxgep);
-hxge_status_t hxge_add_intrs(p_hxge_t hxgep);
+static hxge_status_t hxge_add_intrs(p_hxge_t hxgep);
static void hxge_remove_intrs(p_hxge_t hxgep);
static hxge_status_t hxge_add_intrs_adv(p_hxge_t hxgep);
static hxge_status_t hxge_add_intrs_adv_type(p_hxge_t, uint32_t);
static hxge_status_t hxge_add_intrs_adv_type_fix(p_hxge_t, uint32_t);
-void hxge_intrs_enable(p_hxge_t hxgep);
+static void hxge_intrs_enable(p_hxge_t hxgep);
static void hxge_intrs_disable(p_hxge_t hxgep);
static void hxge_suspend(p_hxge_t);
static hxge_status_t hxge_resume(p_hxge_t);
-hxge_status_t hxge_setup_dev(p_hxge_t);
+static hxge_status_t hxge_setup_dev(p_hxge_t);
static void hxge_destroy_dev(p_hxge_t);
-hxge_status_t hxge_alloc_mem_pool(p_hxge_t);
+static hxge_status_t hxge_alloc_mem_pool(p_hxge_t);
static void hxge_free_mem_pool(p_hxge_t);
static hxge_status_t hxge_alloc_rx_mem_pool(p_hxge_t);
static void hxge_free_rx_mem_pool(p_hxge_t);
@@ -151,8 +156,6 @@ static int hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name,
static void hxge_link_poll(void *arg);
static void hxge_link_update(p_hxge_t hxge, link_state_t state);
static void hxge_msix_init(p_hxge_t hxgep);
-static void hxge_store_msix_table(p_hxge_t hxgep);
-static void hxge_check_1entry_msix_table(p_hxge_t hxgep, int msix_index);
mac_priv_prop_t hxge_priv_props[] = {
{"_rxdma_intr_time", MAC_PROP_PERM_RW},
@@ -217,7 +220,7 @@ extern hxge_status_t hxge_ldgv_init();
extern hxge_status_t hxge_ldgv_uninit();
extern hxge_status_t hxge_intr_ldgv_init();
extern void hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr,
- ddi_dma_attr_t *dma_attr);
+ ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr);
extern void hxge_fm_fini(p_hxge_t hxgep);
/*
@@ -230,10 +233,9 @@ uint32_t hxge_mblks_pending = 0;
* Device register access attributes for PIO.
*/
static ddi_device_acc_attr_t hxge_dev_reg_acc_attr = {
- DDI_DEVICE_ATTR_V1,
+ DDI_DEVICE_ATTR_V0,
DDI_STRUCTURE_LE_ACC,
DDI_STRICTORDER_ACC,
- DDI_DEFAULT_ACC
};
/*
@@ -469,7 +471,8 @@ hxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
hxgep->mmac.addrs[i].primary = B_FALSE;
}
- hxge_fm_init(hxgep, &hxge_dev_reg_acc_attr, &hxge_rx_dma_attr);
+ hxge_fm_init(hxgep, &hxge_dev_reg_acc_attr, &hxge_dev_desc_dma_acc_attr,
+ &hxge_rx_dma_attr);
status = hxge_map_regs(hxgep);
if (status != HXGE_OK) {
@@ -544,9 +547,6 @@ hxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
*/
hxge_intrs_enable(hxgep);
- /* Keep copy of MSIx table written */
- hxge_store_msix_table(hxgep);
-
if ((status = hxge_mac_register(hxgep)) != HXGE_OK) {
HXGE_DEBUG_MSG((hxgep, DDI_CTL,
"unable to register to mac layer (%d)", status));
@@ -686,6 +686,9 @@ hxge_unattach(p_hxge_t hxgep)
hxgep->hxge_timerid = 0;
}
+ /* Stop interrupts. */
+ hxge_intrs_disable(hxgep);
+
/* Stop any further interrupts. */
hxge_remove_intrs(hxgep);
@@ -1012,11 +1015,6 @@ hxge_init(p_hxge_t hxgep)
goto hxge_init_fail5;
}
- hxge_intrs_enable(hxgep);
-
- /* Keep copy of MSIx table written */
- hxge_store_msix_table(hxgep);
-
/*
* Enable hardware interrupts.
*/
@@ -1236,9 +1234,6 @@ hxge_resume(p_hxge_t hxgep)
hxge_intrs_enable(hxgep);
- /* Keep copy of MSIx table written */
- hxge_store_msix_table(hxgep);
-
hxgep->suspended = 0;
/*
@@ -1256,7 +1251,7 @@ hxge_resume(p_hxge_t hxgep)
return (status);
}
-hxge_status_t
+static hxge_status_t
hxge_setup_dev(p_hxge_t hxgep)
{
hxge_status_t status = HXGE_OK;
@@ -1391,7 +1386,7 @@ hxge_get_soft_properties_exit:
return (status);
}
-hxge_status_t
+static hxge_status_t
hxge_alloc_mem_pool(p_hxge_t hxgep)
{
hxge_status_t status = HXGE_OK;
@@ -3590,7 +3585,7 @@ _info(struct modinfo *modinfop)
}
/*ARGSUSED*/
-hxge_status_t
+static hxge_status_t
hxge_add_intrs(p_hxge_t hxgep)
{
int intr_types;
@@ -4129,7 +4124,7 @@ hxge_remove_intrs(p_hxge_t hxgep)
}
/*ARGSUSED*/
-void
+static void
hxge_intrs_enable(p_hxge_t hxgep)
{
p_hxge_intr_t intrp;
@@ -4406,14 +4401,6 @@ hxge_link_poll(void *arg)
}
}
- if (hxgep->msix_count++ >= HXGE_MSIX_PARITY_CHECK_COUNT) {
- hxgep->msix_count = 0;
- hxgep->msix_index++;
- if (hxgep->msix_index >= HXGE_MSIX_ENTRIES)
- hxgep->msix_index = 0;
- hxge_check_1entry_msix_table(hxgep, hxgep->msix_index);
- }
-
/* Restart the link status timer to check the link status */
MUTEX_ENTER(&to->lock);
to->id = timeout(hxge_link_poll, arg, to->ticks);
@@ -4458,6 +4445,7 @@ hxge_msix_init(p_hxge_t hxgep)
HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16, data0);
HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 4, data1);
HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 8, data2);
+ HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 12, 0);
}
/* Initialize ram data out buffer. */
@@ -4469,59 +4457,6 @@ hxge_msix_init(p_hxge_t hxgep)
}
}
-static void
-hxge_store_msix_table(p_hxge_t hxgep)
-{
- int i;
- uint32_t msix_entry0;
- uint32_t msix_entry1;
- uint32_t msix_entry2;
-
- for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
- HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16, &msix_entry0);
- HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 4,
- &msix_entry1);
- HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 8,
- &msix_entry2);
-
- hxgep->msix_table[i][0] = msix_entry0;
- hxgep->msix_table[i][1] = msix_entry1;
- hxgep->msix_table[i][2] = msix_entry2;
- }
-}
-
-static void
-hxge_check_1entry_msix_table(p_hxge_t hxgep, int i)
-{
- uint32_t msix_entry0;
- uint32_t msix_entry1;
- uint32_t msix_entry2;
- p_hxge_peu_sys_stats_t statsp;
-
- statsp = (p_hxge_peu_sys_stats_t)&hxgep->statsp->peu_sys_stats;
-
- HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16, &msix_entry0);
- HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 4, &msix_entry1);
- HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 8, &msix_entry2);
-
- hxgep->msix_table_check[i][0] = msix_entry0;
- hxgep->msix_table_check[i][1] = msix_entry1;
- hxgep->msix_table_check[i][2] = msix_entry2;
-
- if ((hxgep->msix_table[i][0] != hxgep->msix_table_check[i][0]) ||
- (hxgep->msix_table[i][1] != hxgep->msix_table_check[i][1]) ||
- (hxgep->msix_table[i][2] != hxgep->msix_table_check[i][2])) {
- statsp->eic_msix_parerr++;
- if (statsp->eic_msix_parerr == 1) {
- HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
- "==> hxge_check_1entry_msix_table: "
- "eic_msix_parerr at index: %d", i));
- HXGE_FM_REPORT_ERROR(hxgep, NULL,
- HXGE_FM_EREPORT_PEU_ERR);
- }
- }
-}
-
/*
* The following function is to support
* PSARC/2007/453 MSI-X interrupt limit override.
diff --git a/usr/src/uts/common/io/hxge/hxge_rxdma.c b/usr/src/uts/common/io/hxge/hxge_rxdma.c
index 7aa5a51049..6700313f63 100644
--- a/usr/src/uts/common/io/hxge/hxge_rxdma.c
+++ b/usr/src/uts/common/io/hxge/hxge_rxdma.c
@@ -656,6 +656,7 @@ hxge_rxbuf_index_info_init(p_hxge_t hxgep, p_rx_rbr_ring_t rbrp)
ring_info->hint[0] = NO_HINT;
ring_info->hint[1] = NO_HINT;
ring_info->hint[2] = NO_HINT;
+ ring_info->hint[3] = NO_HINT;
max_index = rbrp->num_blocks;
/* read the DVMA address information and sort it */
@@ -837,53 +838,6 @@ hxge_rxdma_hw_mode(p_hxge_t hxgep, boolean_t enable)
return (status);
}
-int
-hxge_rxdma_get_ring_index(p_hxge_t hxgep, uint16_t channel)
-{
- int i, ndmas;
- uint16_t rdc;
- p_rx_rbr_rings_t rx_rbr_rings;
- p_rx_rbr_ring_t *rbr_rings;
-
- HXGE_DEBUG_MSG((hxgep, RX_CTL,
- "==> hxge_rxdma_get_ring_index: channel %d", channel));
-
- rx_rbr_rings = hxgep->rx_rbr_rings;
- if (rx_rbr_rings == NULL) {
- HXGE_DEBUG_MSG((hxgep, RX_CTL,
- "<== hxge_rxdma_get_ring_index: NULL ring pointer"));
- return (-1);
- }
-
- ndmas = rx_rbr_rings->ndmas;
- if (!ndmas) {
- HXGE_DEBUG_MSG((hxgep, RX_CTL,
- "<== hxge_rxdma_get_ring_index: no channel"));
- return (-1);
- }
-
- HXGE_DEBUG_MSG((hxgep, RX_CTL,
- "==> hxge_rxdma_get_ring_index (ndmas %d)", ndmas));
-
- rbr_rings = rx_rbr_rings->rbr_rings;
- for (i = 0; i < ndmas; i++) {
- rdc = rbr_rings[i]->rdc;
- if (channel == rdc) {
- HXGE_DEBUG_MSG((hxgep, RX_CTL,
- "==> hxge_rxdma_get_rbr_ring: "
- "channel %d (index %d) "
- "ring %d", channel, i, rbr_rings[i]));
-
- return (i);
- }
- }
-
- HXGE_DEBUG_MSG((hxgep, RX_CTL,
- "<== hxge_rxdma_get_rbr_ring_index: not found"));
-
- return (-1);
-}
-
/*
* Static functions start here.
*/
@@ -1034,9 +988,8 @@ hxge_post_page(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p)
* DMA channel, if rbr empty was signaled.
*/
hpi_rxdma_rdc_rbr_kick(HXGE_DEV_HPI_HANDLE(hxgep), rx_rbr_p->rdc, 1);
- if (rx_rbr_p->rbr_is_empty &&
- (rx_rbr_p->rbb_max - rx_rbr_p->rbr_used) >=
- HXGE_RBR_EMPTY_THRESHOLD) {
+ if (rx_rbr_p->rbr_is_empty && (rx_rbr_p->rbb_max -
+ rx_rbr_p->rbr_used) >= HXGE_RBR_EMPTY_THRESHOLD) {
hxge_rbr_empty_restore(hxgep, rx_rbr_p);
}
@@ -1141,6 +1094,7 @@ hxge_rx_intr(caddr_t arg1, caddr_t arg2)
hpi_handle_t handle;
rdc_stat_t cs;
p_rx_rcr_ring_t ring;
+ p_rx_rbr_ring_t rbrp;
mblk_t *mp = NULL;
if (ldvp == NULL) {
@@ -1170,8 +1124,12 @@ hxge_rx_intr(caddr_t arg1, caddr_t arg2)
ldgp = ldvp->ldgp;
ASSERT(ring != NULL);
- ASSERT(ring->ldgp == ldgp);
- ASSERT(ring->ldvp == ldvp);
+#if defined(DEBUG)
+ if (rhp->started) {
+ ASSERT(ring->ldgp == ldgp);
+ ASSERT(ring->ldvp == ldvp);
+ }
+#endif
MUTEX_ENTER(&ring->lock);
@@ -1205,11 +1163,16 @@ hxge_rx_intr(caddr_t arg1, caddr_t arg2)
* saves us one pio read. Also write 1 to rcrthres and
* rcrto to clear these two edge triggered bits.
*/
- cs.value &= RDC_STAT_WR1C;
- cs.bits.mex = 1;
- cs.bits.ptrread = 0;
- cs.bits.pktread = 0;
- RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value);
+ rbrp = hxgep->rx_rbr_rings->rbr_rings[channel];
+ MUTEX_ENTER(&rbrp->post_lock);
+ if (!rbrp->rbr_is_empty) {
+ cs.value = 0;
+ cs.bits.mex = 1;
+ cs.bits.ptrread = 0;
+ cs.bits.pktread = 0;
+ RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value);
+ }
+ MUTEX_EXIT(&rbrp->post_lock);
if (ldgp->nldvs == 1) {
/*
@@ -1423,15 +1386,6 @@ hxge_rx_poll(void *arg, int bytes_to_pickup)
MUTEX_ENTER(&ring->lock);
}
- /*
- * Clear any control and status bits and update
- * the hardware.
- */
- cs.value &= RDC_STAT_WR1C;
- cs.bits.ptrread = 0;
- cs.bits.pktread = 0;
- RXDMA_REG_WRITE64(handle, RDC_STAT, rhp->index, cs.value);
-
MUTEX_EXIT(&ring->lock);
return (mblk);
}
@@ -1448,9 +1402,10 @@ hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp,
p_rcr_entry_t rcr_desc_rd_head_pp;
p_mblk_t nmp, mp_cont, head_mp, *tail_mp;
uint16_t qlen, nrcr_read, npkt_read;
- uint32_t qlen_hw, qlen_sw, num_rcrs;
+ uint32_t qlen_hw, npkts, num_rcrs;
uint32_t invalid_rcr_entry;
boolean_t multi;
+ rdc_stat_t pktcs;
rdc_rcr_cfg_b_t rcr_cfg_b;
uint64_t rcr_head_index, rcr_tail_index;
uint64_t rcr_tail;
@@ -1516,15 +1471,15 @@ hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp,
num_rcrs = (rcrp->comp_size - rcr_head_index) + rcr_tail_index;
}
- qlen_sw = hxge_scan_for_last_eop(rcrp, rcr_desc_rd_head_p, num_rcrs);
- if (!qlen_sw)
+ npkts = hxge_scan_for_last_eop(rcrp, rcr_desc_rd_head_p, num_rcrs);
+ if (!npkts)
return (NULL);
- if (qlen_hw > qlen_sw) {
+ if (qlen_hw > npkts) {
HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
"Channel %d, rcr_qlen from reg %d and from rcr_tail %d\n",
channel, qlen_hw, qlen_sw));
- qlen_hw = qlen_sw;
+ qlen_hw = npkts;
}
while (qlen_hw) {
@@ -1627,14 +1582,14 @@ hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp,
channel, rcr_cfg_b.value);
}
+ pktcs.value = 0;
if (hxgep->rdc_first_intr[channel] && (npkt_read > 0)) {
hxgep->rdc_first_intr[channel] = B_FALSE;
- cs.bits.pktread = npkt_read - 1;
+ pktcs.bits.pktread = npkt_read - 1;
} else
- cs.bits.pktread = npkt_read;
- cs.bits.ptrread = nrcr_read;
- cs.value &= 0xffffffffULL;
- RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value);
+ pktcs.bits.pktread = npkt_read;
+ pktcs.bits.ptrread = nrcr_read;
+ RXDMA_REG_WRITE64(handle, RDC_STAT, channel, pktcs.value);
HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
"==> hxge_rx_pkts: EXIT: rcr channel %d "
@@ -1642,7 +1597,6 @@ hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp,
channel, rcrp->rcr_desc_rd_head_pp, rcrp->comp_rd_index));
HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "<== hxge_rx_pkts"));
-
return (head_mp);
}
@@ -1657,7 +1611,7 @@ static uint32_t hxge_scan_for_last_eop(p_rx_rcr_ring_t rcrp,
uint32_t rcrs = 0;
uint32_t pkts = 0;
- while (rcrs++ < num_rcrs) {
+ while (rcrs < num_rcrs) {
rcr_entry = *((uint64_t *)rcr_desc_rd_head_p);
if ((rcr_entry == 0x0) || (rcr_entry == RCR_ENTRY_PATTERN))
@@ -1668,6 +1622,8 @@ static uint32_t hxge_scan_for_last_eop(p_rx_rcr_ring_t rcrp,
rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p,
rcrp->rcr_desc_first_p, rcrp->rcr_desc_last_p);
+
+ rcrs++;
}
return (pkts);
@@ -2165,19 +2121,12 @@ hxge_rx_err_evnts(p_hxge_t hxgep, uint_t index, p_hxge_ldv_t ldvp,
boolean_t rxchan_fatal = B_FALSE;
uint8_t channel;
hxge_status_t status = HXGE_OK;
- uint64_t cs_val;
HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_rx_err_evnts"));
handle = HXGE_DEV_HPI_HANDLE(hxgep);
channel = ldvp->channel;
- /* Clear the interrupts */
- cs.bits.pktread = 0;
- cs.bits.ptrread = 0;
- cs_val = cs.value & RDC_STAT_WR1C;
- RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs_val);
-
rdc_stats = &hxgep->statsp->rdc_stats[ldvp->vdma_index];
if (cs.bits.rbr_cpl_to) {
@@ -2381,6 +2330,11 @@ hxge_map_rxdma(p_hxge_t hxgep)
* Map descriptors from the buffer polls for each dam channel.
*/
for (i = 0; i < ndmas; i++) {
+ if (((p_hxge_dma_common_t)dma_buf_p[i]) == NULL) {
+ status = HXGE_ERROR;
+ goto hxge_map_rxdma_fail1;
+ }
+
/*
* Set up and prepare buffer blocks, descriptors and mailbox.
*/
@@ -2964,38 +2918,29 @@ hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep, uint16_t channel,
rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1);
/*
- * Buffer sizes suggested by NIU architect. 256, 512 and 2K.
+ * Buffer sizes: 256, 1K, and 2K.
+ *
+ * Blk 0 size.
*/
+ rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B;
+ rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES;
+ rbrp->hpi_pkt_buf_size0 = SIZE_256B;
- switch (hxgep->rx_bksize_code) {
- case RBR_BKSIZE_4K:
- rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B;
- rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES;
- rbrp->hpi_pkt_buf_size0 = SIZE_256B;
- break;
- case RBR_BKSIZE_8K:
- /* Use 512 to avoid possible rcr_full condition */
- rbrp->pkt_buf_size0 = RBR_BUFSZ0_512B;
- rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_512_BYTES;
- rbrp->hpi_pkt_buf_size0 = SIZE_512B;
- break;
- }
-
+ /*
+ * Blk 1 size.
+ */
rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K;
rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES;
rbrp->hpi_pkt_buf_size1 = SIZE_1KB;
- rbrp->block_size = hxgep->rx_default_block_size;
+ /*
+ * Blk 2 size.
+ */
+ rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K;
+ rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES;
+ rbrp->hpi_pkt_buf_size2 = SIZE_2KB;
- if (!hxgep->param_arr[param_accept_jumbo].value) {
- rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K;
- rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES;
- rbrp->hpi_pkt_buf_size2 = SIZE_2KB;
- } else {
- rbrp->hpi_pkt_buf_size2 = SIZE_4KB;
- rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K;
- rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES;
- }
+ rbrp->block_size = hxgep->rx_default_block_size;
HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
"==> hxge_map_rxdma_channel_buf_ring: channel %d "
@@ -3576,9 +3521,6 @@ hxge_rxdma_handle_sys_errors(p_hxge_t hxgep)
handle = hxgep->hpi_handle;
statsp = (p_hxge_rdc_sys_stats_t)&hxgep->statsp->rdc_sys_stats;
- /* Clear the int_dbg register in case it is an injected err */
- HXGE_REG_WR64(handle, RDC_FIFO_ERR_INT_DBG, 0x0);
-
/* Get the error status and clear the register */
HXGE_REG_RD64(handle, RDC_FIFO_ERR_STAT, &stat.value);
HXGE_REG_WR64(handle, RDC_FIFO_ERR_STAT, stat.value);
@@ -3641,7 +3583,6 @@ hxge_rxdma_fatal_err_recover(p_hxge_t hxgep, uint16_t channel)
p_rx_mbox_t mboxp;
rdc_int_mask_t ent_mask;
p_hxge_dma_common_t dmap;
- int ring_idx;
p_rx_msg_t rx_msg_p;
int i;
uint32_t hxge_port_rcr_size;
@@ -3659,13 +3600,8 @@ hxge_rxdma_fatal_err_recover(p_hxge_t hxgep, uint16_t channel)
HXGE_DEBUG_MSG((hxgep, RX_CTL, "Rx DMA stop..."));
- ring_idx = hxge_rxdma_get_ring_index(hxgep, channel);
- if (ring_idx < 0) {
- return (HXGE_ERROR);
- }
-
- rbrp = (p_rx_rbr_ring_t)hxgep->rx_rbr_rings->rbr_rings[ring_idx];
- rcrp = (p_rx_rcr_ring_t)hxgep->rx_rcr_rings->rcr_rings[ring_idx];
+ rbrp = (p_rx_rbr_ring_t)hxgep->rx_rbr_rings->rbr_rings[channel];
+ rcrp = (p_rx_rcr_ring_t)hxgep->rx_rcr_rings->rcr_rings[channel];
MUTEX_ENTER(&rcrp->lock);
MUTEX_ENTER(&rbrp->lock);
@@ -3697,7 +3633,7 @@ hxge_rxdma_fatal_err_recover(p_hxge_t hxgep, uint16_t channel)
goto fail;
}
hxge_port_rcr_size = hxgep->hxge_port_rcr_size;
- mboxp = (p_rx_mbox_t)hxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx];
+ mboxp = (p_rx_mbox_t)hxgep->rx_mbox_areas_p->rxmbox_areas[channel];
rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
rbrp->rbr_rd_index = 0;
@@ -3796,11 +3732,6 @@ hxge_rx_port_fatal_err_recover(p_hxge_t hxgep)
HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_port_fatal_err_recover"));
HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovering from RDC error ..."));
- /* Reset RDC block from PEU for this fatal error */
- reset_reg.value = 0;
- reset_reg.bits.rdc_rst = 1;
- HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value);
-
/* Disable RxMAC */
HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxMAC...\n"));
MUTEX_ENTER(&hxgep->vmac_lock);
@@ -3809,6 +3740,15 @@ hxge_rx_port_fatal_err_recover(p_hxge_t hxgep)
HXGE_DELAY(1000);
+ /*
+ * Reset RDC block from PEU for this fatal error
+ */
+ reset_reg.value = 0;
+ reset_reg.bits.rdc_rst = 1;
+ HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value);
+
+ HXGE_DELAY(1000);
+
/* Restore any common settings after PEU reset */
if (hxge_rxdma_hw_start_common(hxgep) != HXGE_OK)
goto fail;
@@ -3824,7 +3764,10 @@ hxge_rx_port_fatal_err_recover(p_hxge_t hxgep)
rbrp = rcrp->rx_rbr_p;
MUTEX_ENTER(&rbrp->post_lock);
- /* This function needs to be inside the post_lock */
+
+ /*
+ * This function needs to be inside the post_lock
+ */
if (hxge_rxdma_fatal_err_recover(hxgep, channel) != HXGE_OK) {
HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
"Could not recover channel %d", channel));
@@ -3879,11 +3822,10 @@ hxge_rbr_empty_restore(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p)
{
hpi_status_t hpi_status;
hxge_status_t status;
+ rdc_stat_t cs;
p_hxge_rx_ring_stats_t rdc_stats;
rdc_stats = &hxgep->statsp->rdc_stats[rx_rbr_p->rdc];
- rdc_stats->rbr_empty_restore++;
- rx_rbr_p->rbr_is_empty = B_FALSE;
/*
* Complete the processing for the RBR Empty by:
@@ -3902,6 +3844,14 @@ hxge_rbr_empty_restore(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p)
MUTEX_ENTER(&hxgep->vmac_lock);
(void) hxge_rx_vmac_disable(hxgep);
+ /*
+ * Re-arm the mex bit for interrupts to be enabled.
+ */
+ cs.value = 0;
+ cs.bits.mex = 1;
+ RXDMA_REG_WRITE64(HXGE_DEV_HPI_HANDLE(hxgep), RDC_STAT,
+ rx_rbr_p->rdc, cs.value);
+
hpi_status = hpi_rxdma_cfg_rdc_enable(
HXGE_DEV_HPI_HANDLE(hxgep), rx_rbr_p->rdc);
if (hpi_status != HPI_SUCCESS) {
@@ -3921,4 +3871,7 @@ hxge_rbr_empty_restore(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p)
*/
(void) hxge_rx_vmac_enable(hxgep);
MUTEX_EXIT(&hxgep->vmac_lock);
+
+ rdc_stats->rbr_empty_restore++;
+ rx_rbr_p->rbr_is_empty = B_FALSE;
}
diff --git a/usr/src/uts/common/io/hxge/hxge_rxdma.h b/usr/src/uts/common/io/hxge/hxge_rxdma.h
index 0a2a0d5cc1..fe51dd1c4a 100644
--- a/usr/src/uts/common/io/hxge/hxge_rxdma.h
+++ b/usr/src/uts/common/io/hxge/hxge_rxdma.h
@@ -60,6 +60,7 @@ extern "C" {
#define RCR_PKTBUFSZ_1 0x01
#define RCR_PKTBUFSZ_2 0x02
#define RCR_SINGLE_BLOCK 0x03
+#define N_PKTSIZE_TYPES 0x04
#define RCR_NO_ERROR 0x0
#define RCR_CTRL_FIFO_DED 0x1
@@ -362,7 +363,7 @@ typedef struct _rxbuf_index_info_t {
/* Buffer index information */
typedef struct _rxring_info_t {
- uint32_t hint[3];
+ uint32_t hint[N_PKTSIZE_TYPES];
uint32_t block_size_mask;
uint16_t max_iterations;
rxbuf_index_info_t buffer[HXGE_DMA_BLOCK];
diff --git a/usr/src/uts/common/io/hxge/hxge_vmac.c b/usr/src/uts/common/io/hxge/hxge_vmac.c
index d1e19b377f..c70815d272 100644
--- a/usr/src/uts/common/io/hxge/hxge_vmac.c
+++ b/usr/src/uts/common/io/hxge/hxge_vmac.c
@@ -18,8 +18,9 @@
*
* CDDL HEADER END
*/
+
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -148,8 +149,8 @@ hxge_rx_vmac_init(p_hxge_t hxgep)
if (hxgep->statsp->port_stats.lb_mode != hxge_lb_normal)
xconfig |= CFG_VMAC_RX_LOOP_BACK;
- if (hpi_vmac_rx_config(handle, INIT, xconfig, max_frame_length)
- != HPI_SUCCESS)
+ if (hpi_vmac_rx_config(handle, INIT, xconfig,
+ max_frame_length) != HPI_SUCCESS)
return (HXGE_ERROR);
hxgep->vmac.rx_config = xconfig;
@@ -221,16 +222,26 @@ hxge_rx_vmac_enable(p_hxge_t hxgep)
* vmac. Max framesize is programed here in
* hxge_rx_vmac_init().
*/
- rv = hxge_rx_vmac_init(hxgep);
- if (rv != HXGE_OK)
- return (rv);
+ rv = hpi_vmac_rx_set_framesize(HXGE_DEV_HPI_HANDLE(hxgep),
+ (uint16_t)hxgep->vmac.maxframesize);
+ if (rv != HPI_SUCCESS) {
+ HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_rx_vmac_enable"));
+ return (HXGE_ERROR);
+ }
+
+ /*
+ * Wait for a period of time.
+ */
+ HXGE_DELAY(10);
+ /*
+ * Enable the vmac.
+ */
rv = hpi_vmac_rx_config(handle, ENABLE, CFG_VMAC_RX_EN, 0);
status = (rv == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR;
HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_rx_vmac_enable"));
-
return (status);
}
@@ -254,12 +265,16 @@ hxge_rx_vmac_disable(p_hxge_t hxgep)
(void) hpi_vmac_rx_set_framesize(HXGE_DEV_HPI_HANDLE(hxgep),
(uint16_t)0);
+ /*
+ * Wait for 10us before doing disable.
+ */
+ HXGE_DELAY(10);
+
rv = hpi_vmac_rx_config(handle, DISABLE, CFG_VMAC_RX_EN, 0);
status = (rv == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR;
HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_rx_vmac_disable"));
-
return (status);
}
@@ -282,6 +297,14 @@ hxge_rx_vmac_reset(p_hxge_t hxgep)
{
hpi_handle_t handle = hxgep->hpi_handle;
+ (void) hpi_vmac_rx_set_framesize(HXGE_DEV_HPI_HANDLE(hxgep),
+ (uint16_t)0);
+
+ /*
+ * Wait for 10us before doing reset.
+ */
+ HXGE_DELAY(10);
+
(void) hpi_rx_vmac_reset(handle);
return (HXGE_OK);