summaryrefslogtreecommitdiff
path: root/usr/src
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src')
-rw-r--r--usr/src/uts/common/Makefile.files2
-rw-r--r--usr/src/uts/common/io/igb/igb_buf.c334
-rw-r--r--usr/src/uts/common/io/igb/igb_gld.c646
-rw-r--r--usr/src/uts/common/io/igb/igb_main.c507
-rw-r--r--usr/src/uts/common/io/igb/igb_ndd.c374
-rw-r--r--usr/src/uts/common/io/igb/igb_rx.c130
-rw-r--r--usr/src/uts/common/io/igb/igb_sw.h229
-rw-r--r--usr/src/uts/common/io/igb/igb_tx.c141
-rw-r--r--usr/src/uts/intel/igb/Makefile8
-rw-r--r--usr/src/uts/sparc/igb/Makefile8
10 files changed, 1391 insertions, 988 deletions
diff --git a/usr/src/uts/common/Makefile.files b/usr/src/uts/common/Makefile.files
index d9bbf0a23a..59914b019a 100644
--- a/usr/src/uts/common/Makefile.files
+++ b/usr/src/uts/common/Makefile.files
@@ -1865,7 +1865,7 @@ E1000G_OBJS += e1000_80003es2lan.o e1000_82540.o e1000_82541.o e1000_82542.o \
IGB_OBJS = igb_82575.o igb_api.o igb_mac.o igb_manage.o \
igb_nvm.o igb_osdep.o igb_phy.o igb_buf.o \
igb_debug.o igb_gld.o igb_log.o igb_main.o \
- igb_ndd.o igb_rx.o igb_stat.o igb_tx.o
+ igb_rx.o igb_stat.o igb_tx.o
#
# Intel 10GbE PCIE NIC driver module
diff --git a/usr/src/uts/common/io/igb/igb_buf.c b/usr/src/uts/common/io/igb/igb_buf.c
index ed798bb39b..1571a1cc61 100644
--- a/usr/src/uts/common/io/igb/igb_buf.c
+++ b/usr/src/uts/common/io/igb/igb_buf.c
@@ -22,7 +22,7 @@
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -30,14 +30,13 @@
static int igb_alloc_tbd_ring(igb_tx_ring_t *);
static void igb_free_tbd_ring(igb_tx_ring_t *);
-static int igb_alloc_rbd_ring(igb_rx_ring_t *);
-static void igb_free_rbd_ring(igb_rx_ring_t *);
+static int igb_alloc_rbd_ring(igb_rx_data_t *);
+static void igb_free_rbd_ring(igb_rx_data_t *);
static int igb_alloc_dma_buffer(igb_t *, dma_buffer_t *, size_t);
-static void igb_free_dma_buffer(dma_buffer_t *);
static int igb_alloc_tcb_lists(igb_tx_ring_t *);
static void igb_free_tcb_lists(igb_tx_ring_t *);
-static int igb_alloc_rcb_lists(igb_rx_ring_t *);
-static void igb_free_rcb_lists(igb_rx_ring_t *);
+static int igb_alloc_rcb_lists(igb_rx_data_t *);
+static void igb_free_rcb_lists(igb_rx_data_t *);
#ifdef __sparc
#define IGB_DMA_ALIGNMENT 0x0000000000002000ull
@@ -125,6 +124,7 @@ int
igb_alloc_dma(igb_t *igb)
{
igb_rx_ring_t *rx_ring;
+ igb_rx_data_t *rx_data;
igb_tx_ring_t *tx_ring;
int i;
@@ -133,11 +133,12 @@ igb_alloc_dma(igb_t *igb)
* Allocate receive desciptor ring and control block lists
*/
rx_ring = &igb->rx_rings[i];
+ rx_data = rx_ring->rx_data;
- if (igb_alloc_rbd_ring(rx_ring) != IGB_SUCCESS)
+ if (igb_alloc_rbd_ring(rx_data) != IGB_SUCCESS)
goto alloc_dma_failure;
- if (igb_alloc_rcb_lists(rx_ring) != IGB_SUCCESS)
+ if (igb_alloc_rcb_lists(rx_data) != IGB_SUCCESS)
goto alloc_dma_failure;
}
@@ -170,6 +171,7 @@ void
igb_free_dma(igb_t *igb)
{
igb_rx_ring_t *rx_ring;
+ igb_rx_data_t *rx_data;
igb_tx_ring_t *tx_ring;
int i;
@@ -178,8 +180,10 @@ igb_free_dma(igb_t *igb)
*/
for (i = 0; i < igb->num_rx_rings; i++) {
rx_ring = &igb->rx_rings[i];
- igb_free_rbd_ring(rx_ring);
- igb_free_rcb_lists(rx_ring);
+ rx_data = rx_ring->rx_data;
+
+ igb_free_rbd_ring(rx_data);
+ igb_free_rcb_lists(rx_data);
}
/*
@@ -321,11 +325,119 @@ igb_free_tbd_ring(igb_tx_ring_t *tx_ring)
tx_ring->tbd_ring = NULL;
}
+int
+igb_alloc_rx_ring_data(igb_rx_ring_t *rx_ring)
+{
+ igb_rx_data_t *rx_data;
+ igb_t *igb = rx_ring->igb;
+ uint32_t rcb_count;
+
+ /*
+ * Allocate memory for software receive rings
+ */
+ rx_data = kmem_zalloc(sizeof (igb_rx_data_t), KM_NOSLEEP);
+
+ if (rx_data == NULL) {
+ igb_error(igb, "Allocate software receive rings failed");
+ return (IGB_FAILURE);
+ }
+
+ rx_data->rx_ring = rx_ring;
+ mutex_init(&rx_data->recycle_lock, NULL,
+ MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri));
+
+ rx_data->ring_size = igb->rx_ring_size;
+ rx_data->free_list_size = igb->rx_ring_size;
+
+ rx_data->rcb_head = 0;
+ rx_data->rcb_tail = 0;
+ rx_data->rcb_free = rx_data->free_list_size;
+
+ /*
+ * Allocate memory for the work list.
+ */
+ rx_data->work_list = kmem_zalloc(sizeof (rx_control_block_t *) *
+ rx_data->ring_size, KM_NOSLEEP);
+
+ if (rx_data->work_list == NULL) {
+ igb_error(igb,
+ "Could not allocate memory for rx work list");
+ goto alloc_rx_data_failure;
+ }
+
+ /*
+ * Allocate memory for the free list.
+ */
+ rx_data->free_list = kmem_zalloc(sizeof (rx_control_block_t *) *
+ rx_data->free_list_size, KM_NOSLEEP);
+
+ if (rx_data->free_list == NULL) {
+ igb_error(igb,
+ "Cound not allocate memory for rx free list");
+ goto alloc_rx_data_failure;
+ }
+
+ /*
+ * Allocate memory for the rx control blocks for work list and
+ * free list.
+ */
+ rcb_count = rx_data->ring_size + rx_data->free_list_size;
+ rx_data->rcb_area =
+ kmem_zalloc(sizeof (rx_control_block_t) * rcb_count,
+ KM_NOSLEEP);
+
+ if (rx_data->rcb_area == NULL) {
+ igb_error(igb,
+ "Cound not allocate memory for rx control blocks");
+ goto alloc_rx_data_failure;
+ }
+
+ rx_ring->rx_data = rx_data;
+ return (IGB_SUCCESS);
+
+alloc_rx_data_failure:
+ igb_free_rx_ring_data(rx_data);
+ return (IGB_FAILURE);
+}
+
+void
+igb_free_rx_ring_data(igb_rx_data_t *rx_data)
+{
+ uint32_t rcb_count;
+
+ if (rx_data == NULL)
+ return;
+
+ ASSERT(rx_data->rcb_pending == 0);
+
+ rcb_count = rx_data->ring_size + rx_data->free_list_size;
+ if (rx_data->rcb_area != NULL) {
+ kmem_free(rx_data->rcb_area,
+ sizeof (rx_control_block_t) * rcb_count);
+ rx_data->rcb_area = NULL;
+ }
+
+ if (rx_data->work_list != NULL) {
+ kmem_free(rx_data->work_list,
+ sizeof (rx_control_block_t *) * rx_data->ring_size);
+ rx_data->work_list = NULL;
+ }
+
+ if (rx_data->free_list != NULL) {
+ kmem_free(rx_data->free_list,
+ sizeof (rx_control_block_t *) * rx_data->free_list_size);
+ rx_data->free_list = NULL;
+ }
+
+ mutex_destroy(&rx_data->recycle_lock);
+ kmem_free(rx_data, sizeof (igb_rx_data_t));
+}
+
/*
* igb_alloc_rbd_ring - Memory allocation for the rx descriptors of one ring.
*/
static int
-igb_alloc_rbd_ring(igb_rx_ring_t *rx_ring)
+igb_alloc_rbd_ring(igb_rx_data_t *rx_data)
{
int ret;
size_t size;
@@ -333,10 +445,10 @@ igb_alloc_rbd_ring(igb_rx_ring_t *rx_ring)
uint_t cookie_num;
dev_info_t *devinfo;
ddi_dma_cookie_t cookie;
- igb_t *igb = rx_ring->igb;
+ igb_t *igb = rx_data->rx_ring->igb;
devinfo = igb->dip;
- size = sizeof (union e1000_adv_rx_desc) * rx_ring->ring_size;
+ size = sizeof (union e1000_adv_rx_desc) * rx_data->ring_size;
/*
* Allocate a new DMA handle for the receive descriptor
@@ -344,12 +456,12 @@ igb_alloc_rbd_ring(igb_rx_ring_t *rx_ring)
*/
ret = ddi_dma_alloc_handle(devinfo, &igb_desc_dma_attr,
DDI_DMA_DONTWAIT, NULL,
- &rx_ring->rbd_area.dma_handle);
+ &rx_data->rbd_area.dma_handle);
if (ret != DDI_SUCCESS) {
igb_error(igb,
"Could not allocate rbd dma handle: %x", ret);
- rx_ring->rbd_area.dma_handle = NULL;
+ rx_data->rbd_area.dma_handle = NULL;
return (IGB_FAILURE);
}
@@ -357,20 +469,20 @@ igb_alloc_rbd_ring(igb_rx_ring_t *rx_ring)
* Allocate memory to DMA data to and from the receive
* descriptors.
*/
- ret = ddi_dma_mem_alloc(rx_ring->rbd_area.dma_handle,
+ ret = ddi_dma_mem_alloc(rx_data->rbd_area.dma_handle,
size, &igb_desc_acc_attr, DDI_DMA_CONSISTENT,
DDI_DMA_DONTWAIT, NULL,
- (caddr_t *)&rx_ring->rbd_area.address,
- &len, &rx_ring->rbd_area.acc_handle);
+ (caddr_t *)&rx_data->rbd_area.address,
+ &len, &rx_data->rbd_area.acc_handle);
if (ret != DDI_SUCCESS) {
igb_error(igb,
"Could not allocate rbd dma memory: %x", ret);
- rx_ring->rbd_area.acc_handle = NULL;
- rx_ring->rbd_area.address = NULL;
- if (rx_ring->rbd_area.dma_handle != NULL) {
- ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle);
- rx_ring->rbd_area.dma_handle = NULL;
+ rx_data->rbd_area.acc_handle = NULL;
+ rx_data->rbd_area.address = NULL;
+ if (rx_data->rbd_area.dma_handle != NULL) {
+ ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
+ rx_data->rbd_area.dma_handle = NULL;
}
return (IGB_FAILURE);
}
@@ -378,40 +490,40 @@ igb_alloc_rbd_ring(igb_rx_ring_t *rx_ring)
/*
* Initialize the entire transmit buffer descriptor area to zero
*/
- bzero(rx_ring->rbd_area.address, len);
+ bzero(rx_data->rbd_area.address, len);
/*
* Allocates DMA resources for the memory that was allocated by
* the ddi_dma_mem_alloc call.
*/
- ret = ddi_dma_addr_bind_handle(rx_ring->rbd_area.dma_handle,
- NULL, (caddr_t)rx_ring->rbd_area.address,
+ ret = ddi_dma_addr_bind_handle(rx_data->rbd_area.dma_handle,
+ NULL, (caddr_t)rx_data->rbd_area.address,
len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
if (ret != DDI_DMA_MAPPED) {
igb_error(igb,
"Could not bind rbd dma resource: %x", ret);
- rx_ring->rbd_area.dma_address = NULL;
- if (rx_ring->rbd_area.acc_handle != NULL) {
- ddi_dma_mem_free(&rx_ring->rbd_area.acc_handle);
- rx_ring->rbd_area.acc_handle = NULL;
- rx_ring->rbd_area.address = NULL;
+ rx_data->rbd_area.dma_address = NULL;
+ if (rx_data->rbd_area.acc_handle != NULL) {
+ ddi_dma_mem_free(&rx_data->rbd_area.acc_handle);
+ rx_data->rbd_area.acc_handle = NULL;
+ rx_data->rbd_area.address = NULL;
}
- if (rx_ring->rbd_area.dma_handle != NULL) {
- ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle);
- rx_ring->rbd_area.dma_handle = NULL;
+ if (rx_data->rbd_area.dma_handle != NULL) {
+ ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
+ rx_data->rbd_area.dma_handle = NULL;
}
return (IGB_FAILURE);
}
ASSERT(cookie_num == 1);
- rx_ring->rbd_area.dma_address = cookie.dmac_laddress;
- rx_ring->rbd_area.size = len;
+ rx_data->rbd_area.dma_address = cookie.dmac_laddress;
+ rx_data->rbd_area.size = len;
- rx_ring->rbd_ring = (union e1000_adv_rx_desc *)(uintptr_t)
- rx_ring->rbd_area.address;
+ rx_data->rbd_ring = (union e1000_adv_rx_desc *)(uintptr_t)
+ rx_data->rbd_area.address;
return (IGB_SUCCESS);
}
@@ -420,24 +532,24 @@ igb_alloc_rbd_ring(igb_rx_ring_t *rx_ring)
* igb_free_rbd_ring - Free the rx descriptors of one ring.
*/
static void
-igb_free_rbd_ring(igb_rx_ring_t *rx_ring)
+igb_free_rbd_ring(igb_rx_data_t *rx_data)
{
- if (rx_ring->rbd_area.dma_handle != NULL) {
- (void) ddi_dma_unbind_handle(rx_ring->rbd_area.dma_handle);
+ if (rx_data->rbd_area.dma_handle != NULL) {
+ (void) ddi_dma_unbind_handle(rx_data->rbd_area.dma_handle);
}
- if (rx_ring->rbd_area.acc_handle != NULL) {
- ddi_dma_mem_free(&rx_ring->rbd_area.acc_handle);
- rx_ring->rbd_area.acc_handle = NULL;
+ if (rx_data->rbd_area.acc_handle != NULL) {
+ ddi_dma_mem_free(&rx_data->rbd_area.acc_handle);
+ rx_data->rbd_area.acc_handle = NULL;
}
- if (rx_ring->rbd_area.dma_handle != NULL) {
- ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle);
- rx_ring->rbd_area.dma_handle = NULL;
+ if (rx_data->rbd_area.dma_handle != NULL) {
+ ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
+ rx_data->rbd_area.dma_handle = NULL;
}
- rx_ring->rbd_area.address = NULL;
- rx_ring->rbd_area.dma_address = NULL;
- rx_ring->rbd_area.size = 0;
+ rx_data->rbd_area.address = NULL;
+ rx_data->rbd_area.dma_address = NULL;
+ rx_data->rbd_area.size = 0;
- rx_ring->rbd_ring = NULL;
+ rx_data->rbd_ring = NULL;
}
@@ -515,7 +627,7 @@ igb_alloc_dma_buffer(igb_t *igb,
/*
* igb_free_dma_buffer - Free one allocated area of dma memory and handle
*/
-static void
+void
igb_free_dma_buffer(dma_buffer_t *buf)
{
if (buf->dma_handle != NULL) {
@@ -710,79 +822,31 @@ igb_free_tcb_lists(igb_tx_ring_t *tx_ring)
* of one ring.
*/
static int
-igb_alloc_rcb_lists(igb_rx_ring_t *rx_ring)
+igb_alloc_rcb_lists(igb_rx_data_t *rx_data)
{
int i;
int ret;
rx_control_block_t *rcb;
- igb_t *igb = rx_ring->igb;
+ igb_t *igb = rx_data->rx_ring->igb;
dma_buffer_t *rx_buf;
uint32_t rcb_count;
/*
- * Allocate memory for the work list.
- */
- rx_ring->work_list = kmem_zalloc(sizeof (rx_control_block_t *) *
- rx_ring->ring_size, KM_NOSLEEP);
-
- if (rx_ring->work_list == NULL) {
- igb_error(igb,
- "Could not allocate memory for rx work list");
- return (IGB_FAILURE);
- }
-
- /*
- * Allocate memory for the free list.
- */
- rx_ring->free_list = kmem_zalloc(sizeof (rx_control_block_t *) *
- rx_ring->free_list_size, KM_NOSLEEP);
-
- if (rx_ring->free_list == NULL) {
- kmem_free(rx_ring->work_list,
- sizeof (rx_control_block_t *) * rx_ring->ring_size);
- rx_ring->work_list = NULL;
-
- igb_error(igb,
- "Cound not allocate memory for rx free list");
- return (IGB_FAILURE);
- }
-
- /*
* Allocate memory for the rx control blocks for work list and
* free list.
*/
- rcb_count = rx_ring->ring_size + rx_ring->free_list_size;
- rx_ring->rcb_area =
- kmem_zalloc(sizeof (rx_control_block_t) * rcb_count,
- KM_NOSLEEP);
-
- if (rx_ring->rcb_area == NULL) {
- kmem_free(rx_ring->work_list,
- sizeof (rx_control_block_t *) * rx_ring->ring_size);
- rx_ring->work_list = NULL;
-
- kmem_free(rx_ring->free_list,
- sizeof (rx_control_block_t *) * rx_ring->free_list_size);
- rx_ring->free_list = NULL;
+ rcb_count = rx_data->ring_size + rx_data->free_list_size;
+ rcb = rx_data->rcb_area;
- igb_error(igb,
- "Cound not allocate memory for rx control blocks");
- return (IGB_FAILURE);
- }
-
- /*
- * Allocate dma memory for the rx control blocks
- */
- rcb = rx_ring->rcb_area;
for (i = 0; i < rcb_count; i++, rcb++) {
ASSERT(rcb != NULL);
- if (i < rx_ring->ring_size) {
+ if (i < rx_data->ring_size) {
/* Attach the rx control block to the work list */
- rx_ring->work_list[i] = rcb;
+ rx_data->work_list[i] = rcb;
} else {
/* Attach the rx control block to the free list */
- rx_ring->free_list[i - rx_ring->ring_size] = rcb;
+ rx_data->free_list[i - rx_data->ring_size] = rcb;
}
rx_buf = &rcb->rx_buf;
@@ -798,8 +862,8 @@ igb_alloc_rcb_lists(igb_rx_ring_t *rx_ring)
rx_buf->address += IPHDR_ALIGN_ROOM;
rx_buf->dma_address += IPHDR_ALIGN_ROOM;
- rcb->state = RCB_FREE;
- rcb->rx_ring = (igb_rx_ring_t *)rx_ring;
+ rcb->ref_cnt = 1;
+ rcb->rx_data = (igb_rx_data_t *)rx_data;
rcb->free_rtn.free_func = igb_rx_recycle;
rcb->free_rtn.free_arg = (char *)rcb;
@@ -812,7 +876,7 @@ igb_alloc_rcb_lists(igb_rx_ring_t *rx_ring)
return (IGB_SUCCESS);
alloc_rcb_lists_fail:
- igb_free_rcb_lists(rx_ring);
+ igb_free_rcb_lists(rx_data);
return (IGB_FAILURE);
}
@@ -821,46 +885,38 @@ alloc_rcb_lists_fail:
* igb_free_rcb_lists - Free the receive control blocks of one ring.
*/
static void
-igb_free_rcb_lists(igb_rx_ring_t *rx_ring)
+igb_free_rcb_lists(igb_rx_data_t *rx_data)
{
- int i;
+ igb_t *igb;
rx_control_block_t *rcb;
uint32_t rcb_count;
+ uint32_t ref_cnt;
+ int i;
- rcb = rx_ring->rcb_area;
- if (rcb == NULL)
- return;
+ igb = rx_data->rx_ring->igb;
+
+ mutex_enter(&igb->rx_pending_lock);
+
+ rcb = rx_data->rcb_area;
+ rcb_count = rx_data->ring_size + rx_data->free_list_size;
- rcb_count = rx_ring->ring_size + rx_ring->free_list_size;
for (i = 0; i < rcb_count; i++, rcb++) {
ASSERT(rcb != NULL);
- ASSERT(rcb->state == RCB_FREE);
- if (rcb->mp != NULL) {
- freemsg(rcb->mp);
- rcb->mp = NULL;
+ ref_cnt = atomic_dec_32_nv(&rcb->ref_cnt);
+ if (ref_cnt == 0) {
+ if (rcb->mp != NULL) {
+ freemsg(rcb->mp);
+ rcb->mp = NULL;
+ }
+ igb_free_dma_buffer(&rcb->rx_buf);
+ } else {
+ atomic_inc_32(&rx_data->rcb_pending);
+ atomic_inc_32(&igb->rcb_pending);
}
-
- igb_free_dma_buffer(&rcb->rx_buf);
}
- if (rx_ring->rcb_area != NULL) {
- kmem_free(rx_ring->rcb_area,
- sizeof (rx_control_block_t) * rcb_count);
- rx_ring->rcb_area = NULL;
- }
-
- if (rx_ring->work_list != NULL) {
- kmem_free(rx_ring->work_list,
- sizeof (rx_control_block_t *) * rx_ring->ring_size);
- rx_ring->work_list = NULL;
- }
-
- if (rx_ring->free_list != NULL) {
- kmem_free(rx_ring->free_list,
- sizeof (rx_control_block_t *) * rx_ring->free_list_size);
- rx_ring->free_list = NULL;
- }
+ mutex_exit(&igb->rx_pending_lock);
}
void
diff --git a/usr/src/uts/common/io/igb/igb_gld.c b/usr/src/uts/common/io/igb/igb_gld.c
index 9fddd30f61..becf960af5 100644
--- a/usr/src/uts/common/io/igb/igb_gld.c
+++ b/usr/src/uts/common/io/igb/igb_gld.c
@@ -24,7 +24,7 @@
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -451,7 +451,7 @@ igb_m_start(void *arg)
return (ECANCELED);
}
- if (igb_start(igb) != IGB_SUCCESS) {
+ if (igb_start(igb, B_TRUE) != IGB_SUCCESS) {
mutex_exit(&igb->gen_lock);
return (EIO);
}
@@ -486,7 +486,7 @@ igb_m_stop(void *arg)
atomic_and_32(&igb->igb_state, ~IGB_STARTED);
- igb_stop(igb);
+ igb_stop(igb, B_TRUE);
mutex_exit(&igb->gen_lock);
@@ -570,6 +570,14 @@ igb_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
iocp->ioc_error = 0;
+ mutex_enter(&igb->gen_lock);
+ if (igb->igb_state & IGB_SUSPENDED) {
+ mutex_exit(&igb->gen_lock);
+ miocnak(q, mp, 0, EINVAL);
+ return;
+ }
+ mutex_exit(&igb->gen_lock);
+
switch (iocp->ioc_cmd) {
case LB_GET_INFO_SIZE:
case LB_GET_INFO:
@@ -578,11 +586,6 @@ igb_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
status = igb_loopback_ioctl(igb, iocp, mp);
break;
- case ND_GET:
- case ND_SET:
- status = igb_nd_ioctl(igb, q, mp, iocp);
- break;
-
default:
status = IOC_INVAL;
break;
@@ -972,3 +975,630 @@ igb_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
}
return (B_TRUE);
}
+
+int
+igb_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
+ uint_t pr_valsize, const void *pr_val)
+{
+ igb_t *igb = (igb_t *)arg;
+ struct e1000_hw *hw = &igb->hw;
+ int err = 0;
+ uint32_t flow_control;
+ uint32_t cur_mtu, new_mtu;
+ uint32_t rx_size;
+ uint32_t tx_size;
+
+ mutex_enter(&igb->gen_lock);
+ if (igb->igb_state & IGB_SUSPENDED) {
+ mutex_exit(&igb->gen_lock);
+ return (ECANCELED);
+ }
+
+ if (igb->loopback_mode != IGB_LB_NONE && igb_param_locked(pr_num)) {
+ /*
+ * All en_* parameters are locked (read-only)
+ * while the device is in any sort of loopback mode.
+ */
+ mutex_exit(&igb->gen_lock);
+ return (EBUSY);
+ }
+
+ switch (pr_num) {
+ case MAC_PROP_EN_1000FDX_CAP:
+ /* read/write on copper, read-only on serdes */
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ err = ENOTSUP;
+ break;
+ }
+ igb->param_en_1000fdx_cap = *(uint8_t *)pr_val;
+ igb->param_adv_1000fdx_cap = *(uint8_t *)pr_val;
+ goto setup_link;
+ case MAC_PROP_EN_100FDX_CAP:
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ err = ENOTSUP;
+ break;
+ }
+ igb->param_en_100fdx_cap = *(uint8_t *)pr_val;
+ igb->param_adv_100fdx_cap = *(uint8_t *)pr_val;
+ goto setup_link;
+ case MAC_PROP_EN_100HDX_CAP:
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ err = ENOTSUP;
+ break;
+ }
+ igb->param_en_100hdx_cap = *(uint8_t *)pr_val;
+ igb->param_adv_100hdx_cap = *(uint8_t *)pr_val;
+ goto setup_link;
+ case MAC_PROP_EN_10FDX_CAP:
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ err = ENOTSUP;
+ break;
+ }
+ igb->param_en_10fdx_cap = *(uint8_t *)pr_val;
+ igb->param_adv_10fdx_cap = *(uint8_t *)pr_val;
+ goto setup_link;
+ case MAC_PROP_EN_10HDX_CAP:
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ err = ENOTSUP;
+ break;
+ }
+ igb->param_en_10hdx_cap = *(uint8_t *)pr_val;
+ igb->param_adv_10hdx_cap = *(uint8_t *)pr_val;
+ goto setup_link;
+ case MAC_PROP_AUTONEG:
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ err = ENOTSUP;
+ break;
+ }
+ igb->param_adv_autoneg_cap = *(uint8_t *)pr_val;
+ goto setup_link;
+ case MAC_PROP_FLOWCTRL:
+ bcopy(pr_val, &flow_control, sizeof (flow_control));
+
+ switch (flow_control) {
+ default:
+ err = EINVAL;
+ break;
+ case LINK_FLOWCTRL_NONE:
+ hw->fc.requested_mode = e1000_fc_none;
+ break;
+ case LINK_FLOWCTRL_RX:
+ hw->fc.requested_mode = e1000_fc_rx_pause;
+ break;
+ case LINK_FLOWCTRL_TX:
+ hw->fc.requested_mode = e1000_fc_tx_pause;
+ break;
+ case LINK_FLOWCTRL_BI:
+ hw->fc.requested_mode = e1000_fc_full;
+ break;
+ }
+setup_link:
+ if (err == 0) {
+ if (igb_setup_link(igb, B_TRUE) != IGB_SUCCESS)
+ err = EINVAL;
+ }
+ break;
+ case MAC_PROP_ADV_1000FDX_CAP:
+ case MAC_PROP_ADV_1000HDX_CAP:
+ case MAC_PROP_ADV_100T4_CAP:
+ case MAC_PROP_ADV_100FDX_CAP:
+ case MAC_PROP_ADV_100HDX_CAP:
+ case MAC_PROP_ADV_10FDX_CAP:
+ case MAC_PROP_ADV_10HDX_CAP:
+ case MAC_PROP_EN_1000HDX_CAP:
+ case MAC_PROP_EN_100T4_CAP:
+ case MAC_PROP_STATUS:
+ case MAC_PROP_SPEED:
+ case MAC_PROP_DUPLEX:
+ err = ENOTSUP; /* read-only prop. Can't set this. */
+ break;
+ case MAC_PROP_MTU:
+ /* adapter must be stopped for an MTU change */
+ if (igb->igb_state & IGB_STARTED) {
+ err = EBUSY;
+ break;
+ }
+
+ cur_mtu = igb->default_mtu;
+ bcopy(pr_val, &new_mtu, sizeof (new_mtu));
+ if (new_mtu == cur_mtu) {
+ err = 0;
+ break;
+ }
+
+ if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) {
+ err = EINVAL;
+ break;
+ }
+
+ err = mac_maxsdu_update(igb->mac_hdl, new_mtu);
+ if (err == 0) {
+ igb->default_mtu = new_mtu;
+ igb->max_frame_size = igb->default_mtu +
+ sizeof (struct ether_vlan_header) + ETHERFCSL;
+
+ /*
+ * Set rx buffer size
+ */
+ rx_size = igb->max_frame_size + IPHDR_ALIGN_ROOM;
+ igb->rx_buf_size = ((rx_size >> 10) + ((rx_size &
+ (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
+
+ /*
+ * Set tx buffer size
+ */
+ tx_size = igb->max_frame_size;
+ igb->tx_buf_size = ((tx_size >> 10) + ((tx_size &
+ (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
+ }
+ break;
+ case MAC_PROP_PRIVATE:
+ err = igb_set_priv_prop(igb, pr_name, pr_valsize, pr_val);
+ break;
+ default:
+ err = EINVAL;
+ break;
+ }
+
+ mutex_exit(&igb->gen_lock);
+
+ if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
+ ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
+ return (EIO);
+ }
+
+ return (err);
+}
+
+int
+igb_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
+ uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm)
+{
+ igb_t *igb = (igb_t *)arg;
+ struct e1000_hw *hw = &igb->hw;
+ int err = 0;
+ uint32_t flow_control;
+ uint64_t tmp = 0;
+ mac_propval_range_t range;
+
+ if (pr_valsize == 0)
+ return (EINVAL);
+
+ *perm = MAC_PROP_PERM_RW;
+
+ bzero(pr_val, pr_valsize);
+ if ((pr_flags & MAC_PROP_DEFAULT) && (pr_num != MAC_PROP_PRIVATE))
+ return (igb_get_def_val(igb, pr_num, pr_valsize, pr_val));
+
+ switch (pr_num) {
+ case MAC_PROP_DUPLEX:
+ *perm = MAC_PROP_PERM_READ;
+ if (pr_valsize >= sizeof (link_duplex_t)) {
+ bcopy(&igb->link_duplex, pr_val,
+ sizeof (link_duplex_t));
+ } else
+ err = EINVAL;
+ break;
+ case MAC_PROP_SPEED:
+ *perm = MAC_PROP_PERM_READ;
+ if (pr_valsize >= sizeof (uint64_t)) {
+ tmp = igb->link_speed * 1000000ull;
+ bcopy(&tmp, pr_val, sizeof (tmp));
+ } else
+ err = EINVAL;
+ break;
+ case MAC_PROP_AUTONEG:
+ if (hw->phy.media_type != e1000_media_type_copper)
+ *perm = MAC_PROP_PERM_READ;
+ *(uint8_t *)pr_val = igb->param_adv_autoneg_cap;
+ break;
+ case MAC_PROP_FLOWCTRL:
+ if (pr_valsize >= sizeof (uint32_t)) {
+ switch (hw->fc.requested_mode) {
+ case e1000_fc_none:
+ flow_control = LINK_FLOWCTRL_NONE;
+ break;
+ case e1000_fc_rx_pause:
+ flow_control = LINK_FLOWCTRL_RX;
+ break;
+ case e1000_fc_tx_pause:
+ flow_control = LINK_FLOWCTRL_TX;
+ break;
+ case e1000_fc_full:
+ flow_control = LINK_FLOWCTRL_BI;
+ break;
+ }
+ bcopy(&flow_control, pr_val, sizeof (flow_control));
+ } else
+ err = EINVAL;
+ break;
+ case MAC_PROP_ADV_1000FDX_CAP:
+ *perm = MAC_PROP_PERM_READ;
+ *(uint8_t *)pr_val = igb->param_adv_1000fdx_cap;
+ break;
+ case MAC_PROP_EN_1000FDX_CAP:
+ if (hw->phy.media_type != e1000_media_type_copper)
+ *perm = MAC_PROP_PERM_READ;
+ *(uint8_t *)pr_val = igb->param_en_1000fdx_cap;
+ break;
+ case MAC_PROP_ADV_1000HDX_CAP:
+ *perm = MAC_PROP_PERM_READ;
+ *(uint8_t *)pr_val = igb->param_adv_1000hdx_cap;
+ break;
+ case MAC_PROP_EN_1000HDX_CAP:
+ *perm = MAC_PROP_PERM_READ;
+ *(uint8_t *)pr_val = igb->param_en_1000hdx_cap;
+ break;
+ case MAC_PROP_ADV_100T4_CAP:
+ *perm = MAC_PROP_PERM_READ;
+ *(uint8_t *)pr_val = igb->param_adv_100t4_cap;
+ break;
+ case MAC_PROP_EN_100T4_CAP:
+ *perm = MAC_PROP_PERM_READ;
+ *(uint8_t *)pr_val = igb->param_en_100t4_cap;
+ break;
+ case MAC_PROP_ADV_100FDX_CAP:
+ *perm = MAC_PROP_PERM_READ;
+ *(uint8_t *)pr_val = igb->param_adv_100fdx_cap;
+ break;
+ case MAC_PROP_EN_100FDX_CAP:
+ if (hw->phy.media_type != e1000_media_type_copper)
+ *perm = MAC_PROP_PERM_READ;
+ *(uint8_t *)pr_val = igb->param_en_100fdx_cap;
+ break;
+ case MAC_PROP_ADV_100HDX_CAP:
+ *perm = MAC_PROP_PERM_READ;
+ *(uint8_t *)pr_val = igb->param_adv_100hdx_cap;
+ break;
+ case MAC_PROP_EN_100HDX_CAP:
+ if (hw->phy.media_type != e1000_media_type_copper)
+ *perm = MAC_PROP_PERM_READ;
+ *(uint8_t *)pr_val = igb->param_en_100hdx_cap;
+ break;
+ case MAC_PROP_ADV_10FDX_CAP:
+ *perm = MAC_PROP_PERM_READ;
+ *(uint8_t *)pr_val = igb->param_adv_10fdx_cap;
+ break;
+ case MAC_PROP_EN_10FDX_CAP:
+ if (hw->phy.media_type != e1000_media_type_copper)
+ *perm = MAC_PROP_PERM_READ;
+ *(uint8_t *)pr_val = igb->param_en_10fdx_cap;
+ break;
+ case MAC_PROP_ADV_10HDX_CAP:
+ *perm = MAC_PROP_PERM_READ;
+ *(uint8_t *)pr_val = igb->param_adv_10hdx_cap;
+ break;
+ case MAC_PROP_EN_10HDX_CAP:
+ if (hw->phy.media_type != e1000_media_type_copper)
+ *perm = MAC_PROP_PERM_READ;
+ *(uint8_t *)pr_val = igb->param_en_10hdx_cap;
+ break;
+ case MAC_PROP_PRIVATE:
+ err = igb_get_priv_prop(igb, pr_name,
+ pr_flags, pr_valsize, pr_val, perm);
+ break;
+ case MAC_PROP_MTU:
+ if (!(pr_flags & MAC_PROP_POSSIBLE))
+ return (ENOTSUP);
+ if (pr_valsize < sizeof (mac_propval_range_t))
+ return (EINVAL);
+ range.mpr_count = 1;
+ range.mpr_type = MAC_PROPVAL_UINT32;
+ range.range_uint32[0].mpur_min = MIN_MTU;
+ range.range_uint32[0].mpur_max = MAX_MTU;
+ bcopy(&range, pr_val, sizeof (range));
+ break;
+ default:
+ err = EINVAL;
+ break;
+ }
+ return (err);
+}
+
+int
+igb_get_def_val(igb_t *igb, mac_prop_id_t pr_num,
+ uint_t pr_valsize, void *pr_val)
+{
+ uint32_t flow_control;
+ struct e1000_hw *hw = &igb->hw;
+ uint16_t phy_status;
+ uint16_t phy_ext_status;
+ int err = 0;
+
+ ASSERT(pr_valsize > 0);
+ switch (pr_num) {
+ case MAC_PROP_AUTONEG:
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ *(uint8_t *)pr_val = 0;
+ } else {
+ (void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
+ *(uint8_t *)pr_val =
+ (phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0;
+ }
+ break;
+ case MAC_PROP_FLOWCTRL:
+ if (pr_valsize < sizeof (uint32_t))
+ return (EINVAL);
+ flow_control = LINK_FLOWCTRL_BI;
+ bcopy(&flow_control, pr_val, sizeof (flow_control));
+ break;
+ case MAC_PROP_ADV_1000FDX_CAP:
+ case MAC_PROP_EN_1000FDX_CAP:
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ *(uint8_t *)pr_val = 1;
+ } else {
+ (void) e1000_read_phy_reg(hw,
+ PHY_EXT_STATUS, &phy_ext_status);
+ *(uint8_t *)pr_val =
+ ((phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
+ (phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0;
+ }
+ break;
+ case MAC_PROP_ADV_1000HDX_CAP:
+ case MAC_PROP_EN_1000HDX_CAP:
+ case MAC_PROP_ADV_100T4_CAP:
+ case MAC_PROP_EN_100T4_CAP:
+ *(uint8_t *)pr_val = 0;
+ break;
+ case MAC_PROP_ADV_100FDX_CAP:
+ case MAC_PROP_EN_100FDX_CAP:
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ *(uint8_t *)pr_val = 0;
+ } else {
+ (void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
+ *(uint8_t *)pr_val =
+ ((phy_status & MII_SR_100X_FD_CAPS) ||
+ (phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0;
+ }
+ break;
+ case MAC_PROP_ADV_100HDX_CAP:
+ case MAC_PROP_EN_100HDX_CAP:
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ *(uint8_t *)pr_val = 0;
+ } else {
+ (void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
+ *(uint8_t *)pr_val =
+ ((phy_status & MII_SR_100X_HD_CAPS) ||
+ (phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0;
+ }
+ break;
+ case MAC_PROP_ADV_10FDX_CAP:
+ case MAC_PROP_EN_10FDX_CAP:
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ *(uint8_t *)pr_val = 0;
+ } else {
+ (void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
+ *(uint8_t *)pr_val =
+ (phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0;
+ }
+ break;
+ case MAC_PROP_ADV_10HDX_CAP:
+ case MAC_PROP_EN_10HDX_CAP:
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ *(uint8_t *)pr_val = 0;
+ } else {
+ (void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
+ *(uint8_t *)pr_val =
+ (phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0;
+ }
+ break;
+ default:
+ err = ENOTSUP;
+ break;
+ }
+ return (err);
+}
+
+boolean_t
+igb_param_locked(mac_prop_id_t pr_num)
+{
+ /*
+ * All en_* parameters are locked (read-only) while
+ * the device is in any sort of loopback mode ...
+ */
+ switch (pr_num) {
+ case MAC_PROP_EN_1000FDX_CAP:
+ case MAC_PROP_EN_1000HDX_CAP:
+ case MAC_PROP_EN_100T4_CAP:
+ case MAC_PROP_EN_100FDX_CAP:
+ case MAC_PROP_EN_100HDX_CAP:
+ case MAC_PROP_EN_10FDX_CAP:
+ case MAC_PROP_EN_10HDX_CAP:
+ case MAC_PROP_AUTONEG:
+ case MAC_PROP_FLOWCTRL:
+ return (B_TRUE);
+ }
+ return (B_FALSE);
+}
+
+/* ARGSUSED */
+int
+igb_set_priv_prop(igb_t *igb, const char *pr_name,
+ uint_t pr_valsize, const void *pr_val)
+{
+ int err = 0;
+ long result;
+ struct e1000_hw *hw = &igb->hw;
+ int i;
+
+ if (strcmp(pr_name, "_tx_copy_thresh") == 0) {
+ if (pr_val == NULL) {
+ err = EINVAL;
+ return (err);
+ }
+ (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
+ if (result < MIN_TX_COPY_THRESHOLD ||
+ result > MAX_TX_COPY_THRESHOLD)
+ err = EINVAL;
+ else {
+ igb->tx_copy_thresh = (uint32_t)result;
+ }
+ return (err);
+ }
+ if (strcmp(pr_name, "_tx_recycle_thresh") == 0) {
+ if (pr_val == NULL) {
+ err = EINVAL;
+ return (err);
+ }
+ (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
+ if (result < MIN_TX_RECYCLE_THRESHOLD ||
+ result > MAX_TX_RECYCLE_THRESHOLD)
+ err = EINVAL;
+ else {
+ igb->tx_recycle_thresh = (uint32_t)result;
+ }
+ return (err);
+ }
+ if (strcmp(pr_name, "_tx_overload_thresh") == 0) {
+ if (pr_val == NULL) {
+ err = EINVAL;
+ return (err);
+ }
+ (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
+ if (result < MIN_TX_OVERLOAD_THRESHOLD ||
+ result > MAX_TX_OVERLOAD_THRESHOLD)
+ err = EINVAL;
+ else {
+ igb->tx_overload_thresh = (uint32_t)result;
+ }
+ return (err);
+ }
+ if (strcmp(pr_name, "_tx_resched_thresh") == 0) {
+ if (pr_val == NULL) {
+ err = EINVAL;
+ return (err);
+ }
+ (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
+ if (result < MIN_TX_RESCHED_THRESHOLD ||
+ result > MAX_TX_RESCHED_THRESHOLD)
+ err = EINVAL;
+ else {
+ igb->tx_resched_thresh = (uint32_t)result;
+ }
+ return (err);
+ }
+ if (strcmp(pr_name, "_rx_copy_thresh") == 0) {
+ if (pr_val == NULL) {
+ err = EINVAL;
+ return (err);
+ }
+ (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
+ if (result < MIN_RX_COPY_THRESHOLD ||
+ result > MAX_RX_COPY_THRESHOLD)
+ err = EINVAL;
+ else {
+ igb->rx_copy_thresh = (uint32_t)result;
+ }
+ return (err);
+ }
+ if (strcmp(pr_name, "_rx_limit_per_intr") == 0) {
+ if (pr_val == NULL) {
+ err = EINVAL;
+ return (err);
+ }
+ (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
+ if (result < MIN_RX_LIMIT_PER_INTR ||
+ result > MAX_RX_LIMIT_PER_INTR)
+ err = EINVAL;
+ else {
+ igb->rx_limit_per_intr = (uint32_t)result;
+ }
+ return (err);
+ }
+ if (strcmp(pr_name, "_intr_throttling") == 0) {
+ if (pr_val == NULL) {
+ err = EINVAL;
+ return (err);
+ }
+ (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
+
+ if (result < igb->capab->min_intr_throttle ||
+ result > igb->capab->max_intr_throttle)
+ err = EINVAL;
+ else {
+ igb->intr_throttling[0] = (uint32_t)result;
+
+ for (i = 0; i < MAX_NUM_EITR; i++)
+ igb->intr_throttling[i] =
+ igb->intr_throttling[0];
+
+ /* Set interrupt throttling rate */
+ for (i = 0; i < igb->intr_cnt; i++)
+ E1000_WRITE_REG(hw, E1000_EITR(i),
+ igb->intr_throttling[i]);
+ }
+ return (err);
+ }
+ return (ENOTSUP);
+}
+
+int
+igb_get_priv_prop(igb_t *igb, const char *pr_name,
+ uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm)
+{
+ int err = ENOTSUP;
+ boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT);
+ int value;
+
+ *perm = MAC_PROP_PERM_RW;
+
+ if (strcmp(pr_name, "_adv_pause_cap") == 0) {
+ *perm = MAC_PROP_PERM_READ;
+ value = (is_default ? 1 : igb->param_adv_pause_cap);
+ err = 0;
+ goto done;
+ }
+ if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
+ *perm = MAC_PROP_PERM_READ;
+ value = (is_default ? 1 : igb->param_adv_asym_pause_cap);
+ err = 0;
+ goto done;
+ }
+ if (strcmp(pr_name, "_tx_copy_thresh") == 0) {
+ value = (is_default ? DEFAULT_TX_COPY_THRESHOLD :
+ igb->tx_copy_thresh);
+ err = 0;
+ goto done;
+ }
+ if (strcmp(pr_name, "_tx_recycle_thresh") == 0) {
+ value = (is_default ? DEFAULT_TX_RECYCLE_THRESHOLD :
+ igb->tx_recycle_thresh);
+ err = 0;
+ goto done;
+ }
+ if (strcmp(pr_name, "_tx_overload_thresh") == 0) {
+ value = (is_default ? DEFAULT_TX_OVERLOAD_THRESHOLD :
+ igb->tx_overload_thresh);
+ err = 0;
+ goto done;
+ }
+ if (strcmp(pr_name, "_tx_resched_thresh") == 0) {
+ value = (is_default ? DEFAULT_TX_RESCHED_THRESHOLD :
+ igb->tx_resched_thresh);
+ err = 0;
+ goto done;
+ }
+ if (strcmp(pr_name, "_rx_copy_thresh") == 0) {
+ value = (is_default ? DEFAULT_RX_COPY_THRESHOLD :
+ igb->rx_copy_thresh);
+ err = 0;
+ goto done;
+ }
+ if (strcmp(pr_name, "_rx_limit_per_intr") == 0) {
+ value = (is_default ? DEFAULT_RX_LIMIT_PER_INTR :
+ igb->rx_limit_per_intr);
+ err = 0;
+ goto done;
+ }
+ if (strcmp(pr_name, "_intr_throttling") == 0) {
+ value = (is_default ? igb->capab->def_intr_throttle :
+ igb->intr_throttling[0]);
+ err = 0;
+ goto done;
+ }
+done:
+ if (err == 0) {
+ (void) snprintf(pr_val, pr_valsize, "%d", value);
+ }
+ return (err);
+}
diff --git a/usr/src/uts/common/io/igb/igb_main.c b/usr/src/uts/common/io/igb/igb_main.c
index 5939625e28..635930b5be 100644
--- a/usr/src/uts/common/io/igb/igb_main.c
+++ b/usr/src/uts/common/io/igb/igb_main.c
@@ -22,14 +22,14 @@
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include "igb_sw.h"
static char ident[] = "Intel 1Gb Ethernet";
-static char igb_version[] = "igb 1.1.10";
+static char igb_version[] = "igb 1.1.11";
/*
* Local function protoypes
@@ -50,6 +50,8 @@ static void igb_tx_clean(igb_t *);
static boolean_t igb_tx_drain(igb_t *);
static boolean_t igb_rx_drain(igb_t *);
static int igb_alloc_rings(igb_t *);
+static int igb_alloc_rx_data(igb_t *);
+static void igb_free_rx_data(igb_t *);
static void igb_free_rings(igb_t *);
static void igb_setup_rings(igb_t *);
static void igb_setup_rx(igb_t *);
@@ -62,6 +64,7 @@ static void igb_setup_mac_classify(igb_t *);
static void igb_init_unicst(igb_t *);
static void igb_setup_multicst(igb_t *);
static void igb_get_phy_state(igb_t *);
+static void igb_param_sync(igb_t *);
static void igb_get_conf(igb_t *);
static int igb_get_prop(igb_t *, char *, int, int, int);
static boolean_t igb_is_link_up(igb_t *);
@@ -119,6 +122,21 @@ static void igb_fm_init(igb_t *);
static void igb_fm_fini(igb_t *);
static void igb_release_multicast(igb_t *);
+mac_priv_prop_t igb_priv_props[] = {
+ {"_tx_copy_thresh", MAC_PROP_PERM_RW},
+ {"_tx_recycle_thresh", MAC_PROP_PERM_RW},
+ {"_tx_overload_thresh", MAC_PROP_PERM_RW},
+ {"_tx_resched_thresh", MAC_PROP_PERM_RW},
+ {"_rx_copy_thresh", MAC_PROP_PERM_RW},
+ {"_rx_limit_per_intr", MAC_PROP_PERM_RW},
+ {"_intr_throttling", MAC_PROP_PERM_RW},
+ {"_adv_pause_cap", MAC_PROP_PERM_READ},
+ {"_adv_asym_pause_cap", MAC_PROP_PERM_READ}
+};
+
+#define IGB_MAX_PRIV_PROPS \
+ (sizeof (igb_priv_props) / sizeof (mac_priv_prop_t))
+
static struct cb_ops igb_cb_ops = {
nulldev, /* cb_open */
nulldev, /* cb_close */
@@ -173,7 +191,8 @@ ddi_device_acc_attr_t igb_regs_acc_attr = {
DDI_FLAGERR_ACC
};
-#define IGB_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB)
+#define IGB_M_CALLBACK_FLAGS \
+ (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
static mac_callbacks_t igb_m_callbacks = {
IGB_M_CALLBACK_FLAGS,
@@ -185,7 +204,11 @@ static mac_callbacks_t igb_m_callbacks = {
NULL,
NULL,
igb_m_ioctl,
- igb_m_getcapab
+ igb_m_getcapab,
+ NULL,
+ NULL,
+ igb_m_setprop,
+ igb_m_getprop
};
/*
@@ -455,16 +478,7 @@ igb_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
igb->attach_progress |= ATTACH_PROGRESS_LOCKS;
/*
- * Allocate DMA resources
- */
- if (igb_alloc_dma(igb) != IGB_SUCCESS) {
- igb_error(igb, "Failed to allocate DMA resources");
- goto attach_fail;
- }
- igb->attach_progress |= ATTACH_PROGRESS_ALLOC_DMA;
-
- /*
- * Initialize the adapter and setup the rx/tx rings
+ * Initialize the adapter
*/
if (igb_init(igb) != IGB_SUCCESS) {
igb_error(igb, "Failed to initialize adapter");
@@ -482,15 +496,6 @@ igb_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
igb->attach_progress |= ATTACH_PROGRESS_STATS;
/*
- * Initialize NDD parameters
- */
- if (igb_nd_init(igb) != IGB_SUCCESS) {
- igb_error(igb, "Failed to initialize ndd");
- goto attach_fail;
- }
- igb->attach_progress |= ATTACH_PROGRESS_NDD;
-
- /*
* Register the driver to the MAC
*/
if (igb_register_mac(igb) != IGB_SUCCESS) {
@@ -579,7 +584,7 @@ igb_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
mutex_enter(&igb->gen_lock);
if (igb->igb_state & IGB_STARTED) {
atomic_and_32(&igb->igb_state, ~IGB_STARTED);
- igb_stop(igb);
+ igb_stop(igb, B_TRUE);
mutex_exit(&igb->gen_lock);
/* Disable and stop the watchdog timer */
igb_disable_watchdog_timer(igb);
@@ -667,13 +672,6 @@ igb_unconfigure(dev_info_t *devinfo, igb_t *igb)
}
/*
- * Free ndd parameters
- */
- if (igb->attach_progress & ATTACH_PROGRESS_NDD) {
- igb_nd_cleanup(igb);
- }
-
- /*
* Free statistics
*/
if (igb->attach_progress & ATTACH_PROGRESS_STATS) {
@@ -702,13 +700,6 @@ igb_unconfigure(dev_info_t *devinfo, igb_t *igb)
}
/*
- * Release the DMA resources of rx/tx rings
- */
- if (igb->attach_progress & ATTACH_PROGRESS_ALLOC_DMA) {
- igb_free_dma(igb);
- }
-
- /*
* Stop the adapter
*/
if (igb->attach_progress & ATTACH_PROGRESS_INIT_ADAPTER) {
@@ -792,6 +783,8 @@ igb_register_mac(igb_t *igb)
mac->m_max_sdu = igb->max_frame_size -
sizeof (struct ether_vlan_header) - ETHERFCSL;
mac->m_margin = VLAN_TAGSZ;
+ mac->m_priv_props = igb_priv_props;
+ mac->m_priv_prop_count = IGB_MAX_PRIV_PROPS;
mac->m_v12n = MAC_VIRT_LEVEL1;
status = mac_register(mac, &igb->mac_hdl);
@@ -956,11 +949,6 @@ igb_init_driver_settings(igb_t *igb)
rx_ring = &igb->rx_rings[i];
rx_ring->index = i;
rx_ring->igb = igb;
-
- rx_ring->ring_size = igb->rx_ring_size;
- rx_ring->free_list_size = igb->rx_ring_size;
- rx_ring->copy_thresh = igb->rx_copy_thresh;
- rx_ring->limit_per_intr = igb->rx_limit_per_intr;
}
for (i = 0; i < igb->num_tx_rings; i++) {
@@ -975,10 +963,6 @@ igb_init_driver_settings(igb_t *igb)
tx_ring->ring_size = igb->tx_ring_size;
tx_ring->free_list_size = igb->tx_ring_size +
(igb->tx_ring_size >> 1);
- tx_ring->copy_thresh = igb->tx_copy_thresh;
- tx_ring->recycle_thresh = igb->tx_recycle_thresh;
- tx_ring->overload_thresh = igb->tx_overload_thresh;
- tx_ring->resched_thresh = igb->tx_resched_thresh;
}
/*
@@ -1009,8 +993,6 @@ igb_init_locks(igb_t *igb)
rx_ring = &igb->rx_rings[i];
mutex_init(&rx_ring->rx_lock, NULL,
MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri));
- mutex_init(&rx_ring->recycle_lock, NULL,
- MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri));
}
for (i = 0; i < igb->num_tx_rings; i++) {
@@ -1048,7 +1030,6 @@ igb_destroy_locks(igb_t *igb)
for (i = 0; i < igb->num_rx_rings; i++) {
rx_ring = &igb->rx_rings[i];
mutex_destroy(&rx_ring->rx_lock);
- mutex_destroy(&rx_ring->recycle_lock);
}
for (i = 0; i < igb->num_tx_rings; i++) {
@@ -1076,7 +1057,7 @@ igb_resume(dev_info_t *devinfo)
mutex_enter(&igb->gen_lock);
if (igb->igb_state & IGB_STARTED) {
- if (igb_start(igb) != IGB_SUCCESS) {
+ if (igb_start(igb, B_FALSE) != IGB_SUCCESS) {
mutex_exit(&igb->gen_lock);
return (DDI_FAILURE);
}
@@ -1112,7 +1093,7 @@ igb_suspend(dev_info_t *devinfo)
return (DDI_SUCCESS);
}
- igb_stop(igb);
+ igb_stop(igb, B_FALSE);
mutex_exit(&igb->gen_lock);
@@ -1127,8 +1108,6 @@ igb_suspend(dev_info_t *devinfo)
static int
igb_init(igb_t *igb)
{
- int i;
-
mutex_enter(&igb->gen_lock);
/*
@@ -1141,26 +1120,6 @@ igb_init(igb_t *igb)
return (IGB_FAILURE);
}
- /*
- * Setup the rx/tx rings
- */
- for (i = 0; i < igb->num_rx_rings; i++)
- mutex_enter(&igb->rx_rings[i].rx_lock);
- for (i = 0; i < igb->num_tx_rings; i++)
- mutex_enter(&igb->tx_rings[i].tx_lock);
-
- igb_setup_rings(igb);
-
- for (i = igb->num_tx_rings - 1; i >= 0; i--)
- mutex_exit(&igb->tx_rings[i].tx_lock);
- for (i = igb->num_rx_rings - 1; i >= 0; i--)
- mutex_exit(&igb->rx_rings[i].rx_lock);
-
- if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
- ddi_fm_service_impact(igb->dip, DDI_SERVICE_LOST);
- return (IGB_FAILURE);
- }
-
mutex_exit(&igb->gen_lock);
return (IGB_SUCCESS);
@@ -1389,6 +1348,8 @@ igb_init_adapter(igb_t *igb)
*/
igb_get_phy_state(igb);
+ igb_param_sync(igb);
+
return (IGB_SUCCESS);
init_adapter_fail:
@@ -1483,6 +1444,7 @@ igb_reset(igb_t *igb)
/*
* Setup the rx/tx rings
*/
+ igb->tx_ring_init = B_FALSE;
igb_setup_rings(igb);
atomic_and_32(&igb->igb_state, ~(IGB_ERROR | IGB_STALL));
@@ -1634,9 +1596,8 @@ igb_tx_drain(igb_t *igb)
static boolean_t
igb_rx_drain(igb_t *igb)
{
- igb_rx_ring_t *rx_ring;
boolean_t done;
- int i, j;
+ int i;
/*
* Polling the rx free list to check if those rx buffers held by
@@ -1649,13 +1610,7 @@ igb_rx_drain(igb_t *igb)
* Otherwise return B_FALSE;
*/
for (i = 0; i < RX_DRAIN_TIME; i++) {
-
- done = B_TRUE;
- for (j = 0; j < igb->num_rx_rings; j++) {
- rx_ring = &igb->rx_rings[j];
- done = done &&
- (rx_ring->rcb_free == rx_ring->free_list_size);
- }
+ done = (igb->rcb_pending == 0);
if (done)
break;
@@ -1670,12 +1625,30 @@ igb_rx_drain(igb_t *igb)
* igb_start - Start the driver/chipset
*/
int
-igb_start(igb_t *igb)
+igb_start(igb_t *igb, boolean_t alloc_buffer)
{
int i;
ASSERT(mutex_owned(&igb->gen_lock));
+ if (alloc_buffer) {
+ if (igb_alloc_rx_data(igb) != IGB_SUCCESS) {
+ igb_error(igb,
+ "Failed to allocate software receive rings");
+ return (IGB_FAILURE);
+ }
+
+ /* Allocate buffers for all the rx/tx rings */
+ if (igb_alloc_dma(igb) != IGB_SUCCESS) {
+ igb_error(igb, "Failed to allocate DMA resource");
+ return (IGB_FAILURE);
+ }
+
+ igb->tx_ring_init = B_TRUE;
+ } else {
+ igb->tx_ring_init = B_FALSE;
+ }
+
for (i = 0; i < igb->num_rx_rings; i++)
mutex_enter(&igb->rx_rings[i].rx_lock);
for (i = 0; i < igb->num_tx_rings; i++)
@@ -1690,14 +1663,14 @@ igb_start(igb_t *igb)
goto start_failure;
}
igb->attach_progress |= ATTACH_PROGRESS_INIT_ADAPTER;
-
- /*
- * Setup the rx/tx rings
- */
- igb_setup_rings(igb);
}
/*
+ * Setup the rx/tx rings
+ */
+ igb_setup_rings(igb);
+
+ /*
* Enable adapter interrupts
* The interrupts must be enabled after the driver state is START
*/
@@ -1731,7 +1704,7 @@ start_failure:
* igb_stop - Stop the driver/chipset
*/
void
-igb_stop(igb_t *igb)
+igb_stop(igb_t *igb, boolean_t free_buffer)
{
int i;
@@ -1771,6 +1744,19 @@ igb_stop(igb_t *igb)
if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK)
ddi_fm_service_impact(igb->dip, DDI_SERVICE_LOST);
+
+ if (igb->link_state == LINK_STATE_UP) {
+ igb->link_state = LINK_STATE_UNKNOWN;
+ mac_link_update(igb->mac_hdl, igb->link_state);
+ }
+
+ if (free_buffer) {
+ /*
+ * Release the DMA/memory resources of rx/tx rings
+ */
+ igb_free_dma(igb);
+ igb_free_rx_data(igb);
+ }
}
/*
@@ -1849,6 +1835,50 @@ igb_free_rings(igb_t *igb)
}
}
+static int
+igb_alloc_rx_data(igb_t *igb)
+{
+ igb_rx_ring_t *rx_ring;
+ int i;
+
+ for (i = 0; i < igb->num_rx_rings; i++) {
+ rx_ring = &igb->rx_rings[i];
+ if (igb_alloc_rx_ring_data(rx_ring) != IGB_SUCCESS)
+ goto alloc_rx_rings_failure;
+ }
+ return (IGB_SUCCESS);
+
+alloc_rx_rings_failure:
+ igb_free_rx_data(igb);
+ return (IGB_FAILURE);
+}
+
+static void
+igb_free_rx_data(igb_t *igb)
+{
+ igb_rx_ring_t *rx_ring;
+ igb_rx_data_t *rx_data;
+ int i;
+
+ for (i = 0; i < igb->num_rx_rings; i++) {
+ rx_ring = &igb->rx_rings[i];
+
+ mutex_enter(&igb->rx_pending_lock);
+ rx_data = rx_ring->rx_data;
+
+ if (rx_data != NULL) {
+ rx_data->flag |= IGB_RX_STOPPED;
+
+ if (rx_data->rcb_pending == 0) {
+ igb_free_rx_ring_data(rx_data);
+ rx_ring->rx_data = NULL;
+ }
+ }
+
+ mutex_exit(&igb->rx_pending_lock);
+ }
+}
+
/*
* igb_setup_rings - Setup rx/tx rings
*/
@@ -1871,6 +1901,7 @@ static void
igb_setup_rx_ring(igb_rx_ring_t *rx_ring)
{
igb_t *igb = rx_ring->igb;
+ igb_rx_data_t *rx_data = rx_ring->rx_data;
struct e1000_hw *hw = &igb->hw;
rx_control_block_t *rcb;
union e1000_adv_rx_desc *rbd;
@@ -1887,8 +1918,8 @@ igb_setup_rx_ring(igb_rx_ring_t *rx_ring)
* Initialize descriptor ring with buffer addresses
*/
for (i = 0; i < igb->rx_ring_size; i++) {
- rcb = rx_ring->work_list[i];
- rbd = &rx_ring->rbd_ring[i];
+ rcb = rx_data->work_list[i];
+ rbd = &rx_data->rbd_ring[i];
rbd->read.pkt_addr = rcb->rx_buf.dma_address;
rbd->read.hdr_addr = NULL;
@@ -1897,15 +1928,15 @@ igb_setup_rx_ring(igb_rx_ring_t *rx_ring)
/*
* Initialize the base address registers
*/
- buf_low = (uint32_t)rx_ring->rbd_area.dma_address;
- buf_high = (uint32_t)(rx_ring->rbd_area.dma_address >> 32);
+ buf_low = (uint32_t)rx_data->rbd_area.dma_address;
+ buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32);
E1000_WRITE_REG(hw, E1000_RDBAH(rx_ring->index), buf_high);
E1000_WRITE_REG(hw, E1000_RDBAL(rx_ring->index), buf_low);
/*
* Initialize the length register
*/
- size = rx_ring->ring_size * sizeof (union e1000_adv_rx_desc);
+ size = rx_data->ring_size * sizeof (union e1000_adv_rx_desc);
E1000_WRITE_REG(hw, E1000_RDLEN(rx_ring->index), size);
/*
@@ -1926,25 +1957,14 @@ igb_setup_rx_ring(igb_rx_ring_t *rx_ring)
rxdctl |= 1 << 16; /* wthresh */
E1000_WRITE_REG(hw, E1000_RXDCTL(rx_ring->index), rxdctl);
- rx_ring->rbd_next = 0;
-
- /*
- * Note: Considering the case that the chipset is being reset
- * and there are still some buffers held by the upper layer,
- * we should not reset the values of rcb_head, rcb_tail and
- * rcb_free;
- */
- if (igb->igb_state == IGB_UNKNOWN) {
- rx_ring->rcb_head = 0;
- rx_ring->rcb_tail = 0;
- rx_ring->rcb_free = rx_ring->free_list_size;
- }
+ rx_data->rbd_next = 0;
}
static void
igb_setup_rx(igb_t *igb)
{
igb_rx_ring_t *rx_ring;
+ igb_rx_data_t *rx_data;
igb_rx_group_t *rx_group;
struct e1000_hw *hw = &igb->hw;
uint32_t rctl, rxcsum;
@@ -2053,8 +2073,9 @@ igb_setup_rx(igb_t *igb)
*/
for (i = 0; i < igb->num_rx_rings; i++) {
rx_ring = &igb->rx_rings[i];
+ rx_data = rx_ring->rx_data;
E1000_WRITE_REG(hw, E1000_RDH(i), 0);
- E1000_WRITE_REG(hw, E1000_RDT(i), rx_ring->ring_size - 1);
+ E1000_WRITE_REG(hw, E1000_RDT(i), rx_data->ring_size - 1);
}
/*
@@ -2141,13 +2162,7 @@ igb_setup_tx_ring(igb_tx_ring_t *tx_ring)
tx_ring->tbd_tail = 0;
tx_ring->tbd_free = tx_ring->ring_size;
- /*
- * Note: for the case that the chipset is being reset, we should not
- * reset the values of tcb_head, tcb_tail. And considering there might
- * still be some packets kept in the pending_list, we should not assert
- * (tcb_free == free_list_size) here.
- */
- if (igb->igb_state == IGB_UNKNOWN) {
+ if (igb->tx_ring_init == B_TRUE) {
tx_ring->tcb_head = 0;
tx_ring->tcb_tail = 0;
tx_ring->tcb_free = tx_ring->free_list_size;
@@ -3632,6 +3647,7 @@ static boolean_t
igb_set_loopback_mode(igb_t *igb, uint32_t mode)
{
struct e1000_hw *hw;
+ int i;
if (mode == igb->loopback_mode)
return (B_TRUE);
@@ -3674,6 +3690,41 @@ igb_set_loopback_mode(igb_t *igb, uint32_t mode)
mutex_exit(&igb->gen_lock);
+ /*
+ * When external loopback is set, wait up to 1000ms to get the link up.
+ * According to test, 1000ms can work and it's an experimental value.
+ */
+ if (mode == IGB_LB_EXTERNAL) {
+ for (i = 0; i <= 10; i++) {
+ mutex_enter(&igb->gen_lock);
+ (void) igb_link_check(igb);
+ mutex_exit(&igb->gen_lock);
+
+ if (igb->link_state == LINK_STATE_UP)
+ break;
+
+ msec_delay(100);
+ }
+
+ if (igb->link_state != LINK_STATE_UP) {
+ /*
+ * Does not support external loopback.
+ * Reset driver to loopback none.
+ */
+ igb->loopback_mode = IGB_LB_NONE;
+
+ /* Reset the chip */
+ hw->phy.autoneg_wait_to_complete = B_TRUE;
+ (void) igb_reset(igb);
+ hw->phy.autoneg_wait_to_complete = B_FALSE;
+
+ IGB_DEBUGLOG_0(igb, "Set external loopback failed, "
+ "reset to loopback none.");
+
+ return (B_FALSE);
+ }
+ }
+
return (B_TRUE);
}
@@ -3847,12 +3898,14 @@ igb_intr_rx_work(igb_rx_ring_t *rx_ring)
static void
igb_intr_tx_work(igb_tx_ring_t *tx_ring)
{
+ igb_t *igb = tx_ring->igb;
+
/* Recycle the tx descriptors */
tx_ring->tx_recycle(tx_ring);
/* Schedule the re-transmit */
if (tx_ring->reschedule &&
- (tx_ring->tbd_free >= tx_ring->resched_thresh)) {
+ (tx_ring->tbd_free >= igb->tx_resched_thresh)) {
tx_ring->reschedule = B_FALSE;
mac_tx_ring_update(tx_ring->igb->mac_hdl, tx_ring->ring_handle);
IGB_DEBUG_STAT(tx_ring->stat_reschedule);
@@ -3950,7 +4003,7 @@ igb_intr_legacy(void *arg1, void *arg2)
/* Schedule the re-transmit */
tx_reschedule = (tx_ring->reschedule &&
- (tx_ring->tbd_free >= tx_ring->resched_thresh));
+ (tx_ring->tbd_free >= igb->tx_resched_thresh));
}
if (icr & E1000_ICR_LSC) {
@@ -4869,84 +4922,146 @@ igb_get_phy_state(igb_t *igb)
ASSERT(mutex_owned(&igb->gen_lock));
- (void) e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
- (void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
- (void) e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &phy_an_adv);
- (void) e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_an_exp);
- (void) e1000_read_phy_reg(hw, PHY_EXT_STATUS, &phy_ext_status);
- (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_1000t_ctrl);
- (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_1000t_status);
- (void) e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_lp_able);
-
- igb->param_autoneg_cap =
- (phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0;
- igb->param_pause_cap =
- (phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
- igb->param_asym_pause_cap =
- (phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
- igb->param_1000fdx_cap = ((phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
- (phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0;
- igb->param_1000hdx_cap = ((phy_ext_status & IEEE_ESR_1000T_HD_CAPS) ||
- (phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0;
- igb->param_100t4_cap =
- (phy_status & MII_SR_100T4_CAPS) ? 1 : 0;
- igb->param_100fdx_cap = ((phy_status & MII_SR_100X_FD_CAPS) ||
- (phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0;
- igb->param_100hdx_cap = ((phy_status & MII_SR_100X_HD_CAPS) ||
- (phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0;
- igb->param_10fdx_cap =
- (phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0;
- igb->param_10hdx_cap =
- (phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0;
- igb->param_rem_fault =
- (phy_status & MII_SR_REMOTE_FAULT) ? 1 : 0;
-
- igb->param_adv_autoneg_cap = hw->mac.autoneg;
- igb->param_adv_pause_cap =
- (phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
- igb->param_adv_asym_pause_cap =
- (phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
- igb->param_adv_1000hdx_cap =
- (phy_1000t_ctrl & CR_1000T_HD_CAPS) ? 1 : 0;
- igb->param_adv_100t4_cap =
- (phy_an_adv & NWAY_AR_100T4_CAPS) ? 1 : 0;
- igb->param_adv_rem_fault =
- (phy_an_adv & NWAY_AR_REMOTE_FAULT) ? 1 : 0;
- if (igb->param_adv_autoneg_cap == 1) {
- igb->param_adv_1000fdx_cap =
- (phy_1000t_ctrl & CR_1000T_FD_CAPS) ? 1 : 0;
- igb->param_adv_100fdx_cap =
- (phy_an_adv & NWAY_AR_100TX_FD_CAPS) ? 1 : 0;
- igb->param_adv_100hdx_cap =
- (phy_an_adv & NWAY_AR_100TX_HD_CAPS) ? 1 : 0;
- igb->param_adv_10fdx_cap =
- (phy_an_adv & NWAY_AR_10T_FD_CAPS) ? 1 : 0;
- igb->param_adv_10hdx_cap =
- (phy_an_adv & NWAY_AR_10T_HD_CAPS) ? 1 : 0;
- }
-
- igb->param_lp_autoneg_cap =
- (phy_an_exp & NWAY_ER_LP_NWAY_CAPS) ? 1 : 0;
- igb->param_lp_pause_cap =
- (phy_lp_able & NWAY_LPAR_PAUSE) ? 1 : 0;
- igb->param_lp_asym_pause_cap =
- (phy_lp_able & NWAY_LPAR_ASM_DIR) ? 1 : 0;
- igb->param_lp_1000fdx_cap =
- (phy_1000t_status & SR_1000T_LP_FD_CAPS) ? 1 : 0;
- igb->param_lp_1000hdx_cap =
- (phy_1000t_status & SR_1000T_LP_HD_CAPS) ? 1 : 0;
- igb->param_lp_100t4_cap =
- (phy_lp_able & NWAY_LPAR_100T4_CAPS) ? 1 : 0;
- igb->param_lp_100fdx_cap =
- (phy_lp_able & NWAY_LPAR_100TX_FD_CAPS) ? 1 : 0;
- igb->param_lp_100hdx_cap =
- (phy_lp_able & NWAY_LPAR_100TX_HD_CAPS) ? 1 : 0;
- igb->param_lp_10fdx_cap =
- (phy_lp_able & NWAY_LPAR_10T_FD_CAPS) ? 1 : 0;
- igb->param_lp_10hdx_cap =
- (phy_lp_able & NWAY_LPAR_10T_HD_CAPS) ? 1 : 0;
- igb->param_lp_rem_fault =
- (phy_lp_able & NWAY_LPAR_REMOTE_FAULT) ? 1 : 0;
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ (void) e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
+ (void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
+ (void) e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &phy_an_adv);
+ (void) e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_an_exp);
+ (void) e1000_read_phy_reg(hw, PHY_EXT_STATUS, &phy_ext_status);
+ (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_1000t_ctrl);
+ (void) e1000_read_phy_reg(hw,
+ PHY_1000T_STATUS, &phy_1000t_status);
+ (void) e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_lp_able);
+
+ igb->param_autoneg_cap =
+ (phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0;
+ igb->param_pause_cap =
+ (phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
+ igb->param_asym_pause_cap =
+ (phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
+ igb->param_1000fdx_cap =
+ ((phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
+ (phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0;
+ igb->param_1000hdx_cap =
+ ((phy_ext_status & IEEE_ESR_1000T_HD_CAPS) ||
+ (phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0;
+ igb->param_100t4_cap =
+ (phy_status & MII_SR_100T4_CAPS) ? 1 : 0;
+ igb->param_100fdx_cap = ((phy_status & MII_SR_100X_FD_CAPS) ||
+ (phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0;
+ igb->param_100hdx_cap = ((phy_status & MII_SR_100X_HD_CAPS) ||
+ (phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0;
+ igb->param_10fdx_cap =
+ (phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0;
+ igb->param_10hdx_cap =
+ (phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0;
+ igb->param_rem_fault =
+ (phy_status & MII_SR_REMOTE_FAULT) ? 1 : 0;
+
+ igb->param_adv_autoneg_cap = hw->mac.autoneg;
+ igb->param_adv_pause_cap =
+ (phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
+ igb->param_adv_asym_pause_cap =
+ (phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
+ igb->param_adv_1000hdx_cap =
+ (phy_1000t_ctrl & CR_1000T_HD_CAPS) ? 1 : 0;
+ igb->param_adv_100t4_cap =
+ (phy_an_adv & NWAY_AR_100T4_CAPS) ? 1 : 0;
+ igb->param_adv_rem_fault =
+ (phy_an_adv & NWAY_AR_REMOTE_FAULT) ? 1 : 0;
+ if (igb->param_adv_autoneg_cap == 1) {
+ igb->param_adv_1000fdx_cap =
+ (phy_1000t_ctrl & CR_1000T_FD_CAPS) ? 1 : 0;
+ igb->param_adv_100fdx_cap =
+ (phy_an_adv & NWAY_AR_100TX_FD_CAPS) ? 1 : 0;
+ igb->param_adv_100hdx_cap =
+ (phy_an_adv & NWAY_AR_100TX_HD_CAPS) ? 1 : 0;
+ igb->param_adv_10fdx_cap =
+ (phy_an_adv & NWAY_AR_10T_FD_CAPS) ? 1 : 0;
+ igb->param_adv_10hdx_cap =
+ (phy_an_adv & NWAY_AR_10T_HD_CAPS) ? 1 : 0;
+ }
+
+ igb->param_lp_autoneg_cap =
+ (phy_an_exp & NWAY_ER_LP_NWAY_CAPS) ? 1 : 0;
+ igb->param_lp_pause_cap =
+ (phy_lp_able & NWAY_LPAR_PAUSE) ? 1 : 0;
+ igb->param_lp_asym_pause_cap =
+ (phy_lp_able & NWAY_LPAR_ASM_DIR) ? 1 : 0;
+ igb->param_lp_1000fdx_cap =
+ (phy_1000t_status & SR_1000T_LP_FD_CAPS) ? 1 : 0;
+ igb->param_lp_1000hdx_cap =
+ (phy_1000t_status & SR_1000T_LP_HD_CAPS) ? 1 : 0;
+ igb->param_lp_100t4_cap =
+ (phy_lp_able & NWAY_LPAR_100T4_CAPS) ? 1 : 0;
+ igb->param_lp_100fdx_cap =
+ (phy_lp_able & NWAY_LPAR_100TX_FD_CAPS) ? 1 : 0;
+ igb->param_lp_100hdx_cap =
+ (phy_lp_able & NWAY_LPAR_100TX_HD_CAPS) ? 1 : 0;
+ igb->param_lp_10fdx_cap =
+ (phy_lp_able & NWAY_LPAR_10T_FD_CAPS) ? 1 : 0;
+ igb->param_lp_10hdx_cap =
+ (phy_lp_able & NWAY_LPAR_10T_HD_CAPS) ? 1 : 0;
+ igb->param_lp_rem_fault =
+ (phy_lp_able & NWAY_LPAR_REMOTE_FAULT) ? 1 : 0;
+ } else {
+ /*
+ * 1Gig Fiber adapter only offers 1Gig Full Duplex.
+ */
+ igb->param_autoneg_cap = 0;
+ igb->param_pause_cap = 1;
+ igb->param_asym_pause_cap = 1;
+ igb->param_1000fdx_cap = 1;
+ igb->param_1000hdx_cap = 0;
+ igb->param_100t4_cap = 0;
+ igb->param_100fdx_cap = 0;
+ igb->param_100hdx_cap = 0;
+ igb->param_10fdx_cap = 0;
+ igb->param_10hdx_cap = 0;
+
+ igb->param_adv_autoneg_cap = 0;
+ igb->param_adv_pause_cap = 1;
+ igb->param_adv_asym_pause_cap = 1;
+ igb->param_adv_1000fdx_cap = 1;
+ igb->param_adv_1000hdx_cap = 0;
+ igb->param_adv_100t4_cap = 0;
+ igb->param_adv_100fdx_cap = 0;
+ igb->param_adv_100hdx_cap = 0;
+ igb->param_adv_10fdx_cap = 0;
+ igb->param_adv_10hdx_cap = 0;
+
+ igb->param_lp_autoneg_cap = 0;
+ igb->param_lp_pause_cap = 0;
+ igb->param_lp_asym_pause_cap = 0;
+ igb->param_lp_1000fdx_cap = 0;
+ igb->param_lp_1000hdx_cap = 0;
+ igb->param_lp_100t4_cap = 0;
+ igb->param_lp_100fdx_cap = 0;
+ igb->param_lp_100hdx_cap = 0;
+ igb->param_lp_10fdx_cap = 0;
+ igb->param_lp_10hdx_cap = 0;
+ igb->param_lp_rem_fault = 0;
+ }
+}
+
+/*
+ * synchronize the adv* and en* parameters.
+ *
+ * See comments in <sys/dld.h> for details of the *_en_*
+ * parameters. The usage of ndd for setting adv parameters will
+ * synchronize all the en parameters with the e1000g parameters,
+ * implicitly disabling any settings made via dladm.
+ */
+static void
+igb_param_sync(igb_t *igb)
+{
+ igb->param_en_1000fdx_cap = igb->param_adv_1000fdx_cap;
+ igb->param_en_1000hdx_cap = igb->param_adv_1000hdx_cap;
+ igb->param_en_100t4_cap = igb->param_adv_100t4_cap;
+ igb->param_en_100fdx_cap = igb->param_adv_100fdx_cap;
+ igb->param_en_100hdx_cap = igb->param_adv_100hdx_cap;
+ igb->param_en_10fdx_cap = igb->param_adv_10fdx_cap;
+ igb->param_en_10hdx_cap = igb->param_adv_10hdx_cap;
}
/*
diff --git a/usr/src/uts/common/io/igb/igb_ndd.c b/usr/src/uts/common/io/igb/igb_ndd.c
deleted file mode 100644
index 3e00a82c89..0000000000
--- a/usr/src/uts/common/io/igb/igb_ndd.c
+++ /dev/null
@@ -1,374 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * Copyright(c) 2007-2009 Intel Corporation. All rights reserved.
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at:
- * http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When using or redistributing this file, you may do so under the
- * License only. No other modification of this header is permitted.
- *
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms of the CDDL.
- */
-
-#include "igb_sw.h"
-
-/* Function prototypes */
-static int igb_nd_get(queue_t *, mblk_t *, caddr_t, cred_t *);
-static int igb_nd_set(queue_t *, mblk_t *, char *, caddr_t, cred_t *);
-static int igb_nd_param_load(igb_t *);
-static void igb_nd_get_param_val(nd_param_t *);
-static int igb_nd_set_param_val(nd_param_t *, uint32_t);
-
-/*
- * Notes:
- * The first character of the <name> field encodes the read/write
- * status of the parameter:
- * '-' => read-only
- * '+' => read/write,
- * '?' => read/write on copper, read-only on serdes
- * '!' => invisible!
- *
- * For writable parameters, we check for a driver property with the
- * same name; if found, and its value is in range, we initialise
- * the parameter from the property, overriding the default in the
- * table below.
- *
- * A NULL in the <name> field terminates the array.
- *
- * The <info> field is used here to provide the index of the
- * parameter to be initialised; thus it doesn't matter whether
- * this table is kept ordered or not.
- *
- * The <info> field in the per-instance copy, on the other hand,
- * is used to count assignments so that we can tell when a magic
- * parameter has been set via ndd (see igb_nd_set()).
- */
-static const nd_param_t nd_template[] = {
-/* igb info min max init r/w+name */
-
-/* Our hardware capabilities */
-{ NULL, PARAM_AUTONEG_CAP, 0, 1, 1, "-autoneg_cap" },
-{ NULL, PARAM_PAUSE_CAP, 0, 1, 1, "-pause_cap" },
-{ NULL, PARAM_ASYM_PAUSE_CAP, 0, 1, 1, "-asym_pause_cap" },
-{ NULL, PARAM_1000FDX_CAP, 0, 1, 1, "-1000fdx_cap" },
-{ NULL, PARAM_1000HDX_CAP, 0, 1, 1, "-1000hdx_cap" },
-{ NULL, PARAM_100T4_CAP, 0, 1, 0, "-100T4_cap" },
-{ NULL, PARAM_100FDX_CAP, 0, 1, 1, "-100fdx_cap" },
-{ NULL, PARAM_100HDX_CAP, 0, 1, 1, "-100hdx_cap" },
-{ NULL, PARAM_10FDX_CAP, 0, 1, 1, "-10fdx_cap" },
-{ NULL, PARAM_10HDX_CAP, 0, 1, 1, "-10hdx_cap" },
-{ NULL, PARAM_REM_FAULT, 0, 1, 0, "-rem_fault" },
-
-/* Our advertised capabilities */
-{ NULL, PARAM_ADV_AUTONEG_CAP, 0, 1, 1, "?adv_autoneg_cap" },
-{ NULL, PARAM_ADV_PAUSE_CAP, 0, 1, 1, "-adv_pause_cap" },
-{ NULL, PARAM_ADV_ASYM_PAUSE_CAP, 0, 1, 1, "-adv_asym_pause_cap" },
-{ NULL, PARAM_ADV_1000FDX_CAP, 0, 1, 1, "?adv_1000fdx_cap" },
-{ NULL, PARAM_ADV_1000HDX_CAP, 0, 1, 1, "-adv_1000hdx_cap" },
-{ NULL, PARAM_ADV_100T4_CAP, 0, 1, 0, "-adv_100T4_cap" },
-{ NULL, PARAM_ADV_100FDX_CAP, 0, 1, 1, "?adv_100fdx_cap" },
-{ NULL, PARAM_ADV_100HDX_CAP, 0, 1, 1, "?adv_100hdx_cap" },
-{ NULL, PARAM_ADV_10FDX_CAP, 0, 1, 1, "?adv_10fdx_cap" },
-{ NULL, PARAM_ADV_10HDX_CAP, 0, 1, 1, "?adv_10hdx_cap" },
-{ NULL, PARAM_ADV_REM_FAULT, 0, 1, 0, "-adv_rem_fault" },
-
-/* Partner's advertised capabilities */
-{ NULL, PARAM_LP_AUTONEG_CAP, 0, 1, 0, "-lp_autoneg_cap" },
-{ NULL, PARAM_LP_PAUSE_CAP, 0, 1, 0, "-lp_pause_cap" },
-{ NULL, PARAM_LP_ASYM_PAUSE_CAP, 0, 1, 0, "-lp_asym_pause_cap" },
-{ NULL, PARAM_LP_1000FDX_CAP, 0, 1, 0, "-lp_1000fdx_cap" },
-{ NULL, PARAM_LP_1000HDX_CAP, 0, 1, 0, "-lp_1000hdx_cap" },
-{ NULL, PARAM_LP_100T4_CAP, 0, 1, 0, "-lp_100T4_cap" },
-{ NULL, PARAM_LP_100FDX_CAP, 0, 1, 0, "-lp_100fdx_cap" },
-{ NULL, PARAM_LP_100HDX_CAP, 0, 1, 0, "-lp_100hdx_cap" },
-{ NULL, PARAM_LP_10FDX_CAP, 0, 1, 0, "-lp_10fdx_cap" },
-{ NULL, PARAM_LP_10HDX_CAP, 0, 1, 0, "-lp_10hdx_cap" },
-{ NULL, PARAM_LP_REM_FAULT, 0, 1, 0, "-lp_rem_fault" },
-
-/* Current operating modes */
-{ NULL, PARAM_LINK_STATUS, 0, 1, 0, "-link_status" },
-{ NULL, PARAM_LINK_SPEED, 0, 1000, 0, "-link_speed" },
-{ NULL, PARAM_LINK_DUPLEX, 0, 2, 0, "-link_duplex" },
-
-/* Terminator */
-{ NULL, PARAM_COUNT, 0, 0, 0, NULL }
-};
-
-
-/*
- * igb_nd_get - ndd get parameter values
- */
-static int
-igb_nd_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
-{
- nd_param_t *nd = (nd_param_t *)(uintptr_t)cp;
- _NOTE(ARGUNUSED(q));
- _NOTE(ARGUNUSED(credp));
-
- igb_nd_get_param_val(nd);
- (void) mi_mpprintf(mp, "%d", nd->val);
-
- return (0);
-}
-
-/*
- * igb_nd_set - ndd set parameter values
- */
-static int
-igb_nd_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *credp)
-{
- nd_param_t *nd = (nd_param_t *)(uintptr_t)cp;
- long new_value;
- char *end;
- _NOTE(ARGUNUSED(q));
- _NOTE(ARGUNUSED(mp));
- _NOTE(ARGUNUSED(credp));
-
- new_value = mi_strtol(value, &end, 10);
- if (end == value)
- return (EINVAL);
- if (new_value < nd->min || new_value > nd->max)
- return (EINVAL);
-
- if (igb_nd_set_param_val(nd, new_value) != IGB_SUCCESS)
- return (EIO);
-
- return (0);
-}
-
-/*
- * igb_nd_param_load
- */
-static int
-igb_nd_param_load(igb_t *igb)
-{
- const nd_param_t *tmpnd;
- nd_param_t *nd;
- caddr_t *ndd;
- pfi_t setfn;
- char *nm;
- int value;
-
- ndd = &igb->nd_data;
- ASSERT(*ndd == NULL);
-
- for (tmpnd = nd_template; tmpnd->name != NULL; ++tmpnd) {
- /*
- * Copy the template from nd_template[] into the
- * proper slot in the per-instance parameters,
- * then register the parameter with nd_load()
- */
- nd = &igb->nd_params[tmpnd->info];
- *nd = *tmpnd;
- nd->private = igb;
- igb_nd_get_param_val(nd);
-
- nm = &nd->name[0];
- setfn = igb_nd_set;
-
- if (igb->hw.phy.media_type != e1000_media_type_copper) {
- switch (*nm) {
- default:
- break;
-
- case '?':
- setfn = NULL;
- break;
- }
- }
-
- switch (*nm) {
- default:
- case '!':
- continue;
-
- case '+':
- case '?':
- break;
-
- case '-':
- setfn = NULL;
- break;
- }
-
- if (!nd_load(ndd, ++nm, igb_nd_get, setfn, (caddr_t)nd))
- goto nd_fail;
-
- /*
- * If the parameter is writable, and there's a property
- * with the same name, and its value is in range, we use
- * it to initialise the parameter. If it exists but is
- * out of range, it's ignored.
- */
- if (setfn && IGB_PROP_EXISTS(igb->dip, nm)) {
- value = IGB_PROP_GET_INT(igb->dip, nm);
- if (value >= nd->min && value <= nd->max)
- nd->val = value;
- }
- }
-
- return (IGB_SUCCESS);
-
-nd_fail:
- igb_log(igb,
- "igb_nd_param_load: failed at index %d [info %d]",
- (tmpnd - nd_template), tmpnd->info);
- nd_free(ndd);
- return (IGB_FAILURE);
-}
-
-
-/*
- * igb_nd_get_param_val
- */
-static void
-igb_nd_get_param_val(nd_param_t *nd)
-{
- igb_t *igb = (igb_t *)nd->private;
-
- mutex_enter(&igb->gen_lock);
-
- switch (nd->info) {
- case PARAM_LINK_STATUS:
- nd->val = (igb->link_state == LINK_STATE_UP) ? 1 : 0;
- break;
- case PARAM_LINK_SPEED:
- nd->val = igb->link_speed;
- break;
- case PARAM_LINK_DUPLEX:
- nd->val = igb->link_duplex;
- break;
- default:
- break;
- }
-
- mutex_exit(&igb->gen_lock);
-}
-
-/*
- * igb_nd_set_param_val
- */
-static int
-igb_nd_set_param_val(nd_param_t *nd, uint32_t value)
-{
- igb_t *igb = (igb_t *)nd->private;
- int result = IGB_SUCCESS;
-
- mutex_enter(&igb->gen_lock);
-
- if (nd->val == value) {
- mutex_exit(&igb->gen_lock);
- return (IGB_SUCCESS);
- }
-
- switch (nd->info) {
- case PARAM_ADV_AUTONEG_CAP:
- case PARAM_ADV_1000FDX_CAP:
- case PARAM_ADV_100FDX_CAP:
- case PARAM_ADV_100HDX_CAP:
- case PARAM_ADV_10FDX_CAP:
- case PARAM_ADV_10HDX_CAP:
- nd->val = value;
- (void) igb_setup_link(igb, B_TRUE);
- if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
- ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
- result = IGB_FAILURE;
- }
- break;
-
- default:
- break;
- }
-
- mutex_exit(&igb->gen_lock);
-
- return (result);
-}
-
-/*
- * comment describing function
- */
-int
-igb_nd_init(igb_t *igb)
-{
- /*
- * Register all the per-instance properties, initialising
- * them from the table above or from driver properties set
- * in the .conf file
- */
- if (igb_nd_param_load(igb) != IGB_SUCCESS)
- return (IGB_FAILURE);
-
- return (IGB_SUCCESS);
-}
-
-
-/*
- * Free the Named Dispatch Table by calling nd_free
- */
-void
-igb_nd_cleanup(igb_t *igb)
-{
- nd_free(&igb->nd_data);
-}
-
-/*
- * comment describing function
- */
-enum ioc_reply
-igb_nd_ioctl(igb_t *igb, queue_t *q,
- mblk_t *mp, struct iocblk *ioc)
-{
- boolean_t ok;
- int cmd;
-
- cmd = ioc->ioc_cmd;
- switch (cmd) {
- default:
- /* NOTREACHED */
- ASSERT(B_FALSE);
- return (IOC_INVAL);
-
- case ND_GET:
- /*
- * If nd_getset() returns B_FALSE, the command was
- * not valid (e.g. unknown name), so we just tell the
- * top-level ioctl code to send a NAK (with code EINVAL).
- *
- * Otherwise, nd_getset() will have built the reply to
- * be sent (but not actually sent it), so we tell the
- * caller to send the prepared reply.
- */
- ok = nd_getset(q, igb->nd_data, mp);
- return (ok ? IOC_REPLY : IOC_INVAL);
-
- case ND_SET:
- /*
- * All adv_* parameters are locked (read-only) while
- * the device is in any sort of loopback mode ...
- */
- if (igb->loopback_mode != IGB_LB_NONE) {
- ioc->ioc_error = EBUSY;
- return (IOC_INVAL);
- }
-
- ok = nd_getset(q, igb->nd_data, mp);
- return (ok ? IOC_REPLY : IOC_INVAL);
- }
-}
diff --git a/usr/src/uts/common/io/igb/igb_rx.c b/usr/src/uts/common/io/igb/igb_rx.c
index 13265e8a28..1eeaf9d325 100644
--- a/usr/src/uts/common/io/igb/igb_rx.c
+++ b/usr/src/uts/common/io/igb/igb_rx.c
@@ -22,15 +22,15 @@
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms of the CDDL.
*/
#include "igb_sw.h"
/* function prototypes */
-static mblk_t *igb_rx_bind(igb_rx_ring_t *, uint32_t, uint32_t);
-static mblk_t *igb_rx_copy(igb_rx_ring_t *, uint32_t, uint32_t);
+static mblk_t *igb_rx_bind(igb_rx_data_t *, uint32_t, uint32_t);
+static mblk_t *igb_rx_copy(igb_rx_data_t *, uint32_t, uint32_t);
static void igb_rx_assoc_hcksum(mblk_t *, uint32_t);
#ifndef IGB_DEBUG
@@ -48,17 +48,25 @@ static void igb_rx_assoc_hcksum(mblk_t *, uint32_t);
void
igb_rx_recycle(caddr_t arg)
{
+ igb_t *igb;
igb_rx_ring_t *rx_ring;
+ igb_rx_data_t *rx_data;
rx_control_block_t *recycle_rcb;
uint32_t free_index;
+ uint32_t ref_cnt;
recycle_rcb = (rx_control_block_t *)(uintptr_t)arg;
- rx_ring = recycle_rcb->rx_ring;
+ rx_data = recycle_rcb->rx_data;
+ rx_ring = rx_data->rx_ring;
+ igb = rx_ring->igb;
- if (recycle_rcb->state == RCB_FREE)
+ if (recycle_rcb->ref_cnt == 0) {
+ /*
+ * This case only happens when rx buffers are being freed
+ * in igb_stop() and freemsg() is called.
+ */
return;
-
- recycle_rcb->state = RCB_FREE;
+ }
ASSERT(recycle_rcb->mp == NULL);
@@ -73,23 +81,52 @@ igb_rx_recycle(caddr_t arg)
/*
* Put the recycled rx control block into free list
*/
- mutex_enter(&rx_ring->recycle_lock);
+ mutex_enter(&rx_data->recycle_lock);
- free_index = rx_ring->rcb_tail;
- ASSERT(rx_ring->free_list[free_index] == NULL);
+ free_index = rx_data->rcb_tail;
+ ASSERT(rx_data->free_list[free_index] == NULL);
- rx_ring->free_list[free_index] = recycle_rcb;
- rx_ring->rcb_tail = NEXT_INDEX(free_index, 1, rx_ring->free_list_size);
+ rx_data->free_list[free_index] = recycle_rcb;
+ rx_data->rcb_tail = NEXT_INDEX(free_index, 1, rx_data->free_list_size);
- mutex_exit(&rx_ring->recycle_lock);
+ mutex_exit(&rx_data->recycle_lock);
/*
* The atomic operation on the number of the available rx control
* blocks in the free list is used to make the recycling mutual
* exclusive with the receiving.
*/
- atomic_inc_32(&rx_ring->rcb_free);
- ASSERT(rx_ring->rcb_free <= rx_ring->free_list_size);
+ atomic_inc_32(&rx_data->rcb_free);
+ ASSERT(rx_data->rcb_free <= rx_data->free_list_size);
+
+ /*
+ * Considering the case that the interface is unplumbed
+ * and there are still some buffers held by the upper layer.
+ * When the buffer is returned back, we need to free it.
+ */
+ ref_cnt = atomic_dec_32_nv(&recycle_rcb->ref_cnt);
+ if (ref_cnt == 0) {
+ if (recycle_rcb->mp != NULL) {
+ freemsg(recycle_rcb->mp);
+ recycle_rcb->mp = NULL;
+ }
+
+ igb_free_dma_buffer(&recycle_rcb->rx_buf);
+
+ mutex_enter(&igb->rx_pending_lock);
+ atomic_dec_32(&rx_data->rcb_pending);
+ atomic_dec_32(&igb->rcb_pending);
+
+ /*
+ * When there is not any buffer belonging to this rx_data
+ * held by the upper layer, the rx_data can be freed.
+ */
+ if ((rx_data->flag & IGB_RX_STOPPED) &&
+ (rx_data->rcb_pending == 0))
+ igb_free_rx_ring_data(rx_data);
+
+ mutex_exit(&igb->rx_pending_lock);
+ }
}
/*
@@ -99,13 +136,13 @@ igb_rx_recycle(caddr_t arg)
* and send the copied packet upstream
*/
static mblk_t *
-igb_rx_copy(igb_rx_ring_t *rx_ring, uint32_t index, uint32_t pkt_len)
+igb_rx_copy(igb_rx_data_t *rx_data, uint32_t index, uint32_t pkt_len)
{
rx_control_block_t *current_rcb;
mblk_t *mp;
- igb_t *igb = rx_ring->igb;
+ igb_t *igb = rx_data->rx_ring->igb;
- current_rcb = rx_ring->work_list[index];
+ current_rcb = rx_data->work_list[index];
DMA_SYNC(&current_rcb->rx_buf, DDI_DMA_SYNC_FORKERNEL);
@@ -121,7 +158,7 @@ igb_rx_copy(igb_rx_ring_t *rx_ring, uint32_t index, uint32_t pkt_len)
*/
mp = allocb(pkt_len + IPHDR_ALIGN_ROOM, 0);
if (mp == NULL) {
- igb_log(rx_ring->igb, "igb_rx_copy: allocate buffer failed");
+ igb_log(igb, "igb_rx_copy: allocate buffer failed");
return (NULL);
}
@@ -142,23 +179,23 @@ igb_rx_copy(igb_rx_ring_t *rx_ring, uint32_t index, uint32_t pkt_len)
* and build mblk that will be sent upstream.
*/
static mblk_t *
-igb_rx_bind(igb_rx_ring_t *rx_ring, uint32_t index, uint32_t pkt_len)
+igb_rx_bind(igb_rx_data_t *rx_data, uint32_t index, uint32_t pkt_len)
{
rx_control_block_t *current_rcb;
rx_control_block_t *free_rcb;
uint32_t free_index;
mblk_t *mp;
- igb_t *igb = rx_ring->igb;
+ igb_t *igb = rx_data->rx_ring->igb;
/*
* If the free list is empty, we cannot proceed to send
* the current DMA buffer upstream. We'll have to return
* and use bcopy to process the packet.
*/
- if (igb_atomic_reserve(&rx_ring->rcb_free, 1) < 0)
+ if (igb_atomic_reserve(&rx_data->rcb_free, 1) < 0)
return (NULL);
- current_rcb = rx_ring->work_list[index];
+ current_rcb = rx_data->work_list[index];
/*
* If the mp of the rx control block is NULL, try to do
* desballoc again.
@@ -174,7 +211,7 @@ igb_rx_bind(igb_rx_ring_t *rx_ring, uint32_t index, uint32_t pkt_len)
* process the packet.
*/
if (current_rcb->mp == NULL) {
- atomic_inc_32(&rx_ring->rcb_free);
+ atomic_inc_32(&rx_data->rcb_free);
return (NULL);
}
}
@@ -187,13 +224,13 @@ igb_rx_bind(igb_rx_ring_t *rx_ring, uint32_t index, uint32_t pkt_len)
current_rcb->rx_buf.dma_handle) != DDI_FM_OK) {
ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
atomic_or_32(&igb->igb_state, IGB_ERROR);
- atomic_inc_32(&rx_ring->rcb_free);
+ atomic_inc_32(&rx_data->rcb_free);
return (NULL);
}
mp = current_rcb->mp;
current_rcb->mp = NULL;
- current_rcb->state = RCB_SENDUP;
+ atomic_inc_32(&current_rcb->ref_cnt);
mp->b_wptr = mp->b_rptr + pkt_len;
mp->b_next = mp->b_cont = NULL;
@@ -201,16 +238,16 @@ igb_rx_bind(igb_rx_ring_t *rx_ring, uint32_t index, uint32_t pkt_len)
/*
* Strip off one free rx control block from the free list
*/
- free_index = rx_ring->rcb_head;
- free_rcb = rx_ring->free_list[free_index];
+ free_index = rx_data->rcb_head;
+ free_rcb = rx_data->free_list[free_index];
ASSERT(free_rcb != NULL);
- rx_ring->free_list[free_index] = NULL;
- rx_ring->rcb_head = NEXT_INDEX(free_index, 1, rx_ring->free_list_size);
+ rx_data->free_list[free_index] = NULL;
+ rx_data->rcb_head = NEXT_INDEX(free_index, 1, rx_data->free_list_size);
/*
* Put the rx control block to the work list
*/
- rx_ring->work_list[index] = free_rcb;
+ rx_data->work_list[index] = free_rcb;
return (mp);
}
@@ -292,6 +329,7 @@ igb_rx(igb_rx_ring_t *rx_ring, int poll_bytes)
uint32_t pkt_num;
uint32_t total_bytes;
igb_t *igb = rx_ring->igb;
+ igb_rx_data_t *rx_data = rx_ring->rx_data;
mblk_head = NULL;
mblk_tail = &mblk_head;
@@ -303,10 +341,10 @@ igb_rx(igb_rx_ring_t *rx_ring, int poll_bytes)
* Sync the receive descriptors before
* accepting the packets
*/
- DMA_SYNC(&rx_ring->rbd_area, DDI_DMA_SYNC_FORKERNEL);
+ DMA_SYNC(&rx_data->rbd_area, DDI_DMA_SYNC_FORKERNEL);
if (igb_check_dma_handle(
- rx_ring->rbd_area.dma_handle) != DDI_FM_OK) {
+ rx_data->rbd_area.dma_handle) != DDI_FM_OK) {
ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
atomic_or_32(&igb->igb_state, IGB_ERROR);
return (NULL);
@@ -316,9 +354,9 @@ igb_rx(igb_rx_ring_t *rx_ring, int poll_bytes)
* Get the start point of rx bd ring which should be examined
* during this cycle.
*/
- rx_next = rx_ring->rbd_next;
+ rx_next = rx_data->rbd_next;
- current_rbd = &rx_ring->rbd_ring[rx_next];
+ current_rbd = &rx_data->rbd_ring[rx_next];
pkt_num = 0;
total_bytes = 0;
status_error = current_rbd->wb.upper.status_error;
@@ -358,11 +396,11 @@ igb_rx(igb_rx_ring_t *rx_ring, int poll_bytes)
* than the copy threshold, we'll allocate a new mblk and
* copy the packet data to the mblk.
*/
- if (pkt_len > rx_ring->copy_thresh)
- mp = igb_rx_bind(rx_ring, rx_next, pkt_len);
+ if (pkt_len > igb->rx_copy_thresh)
+ mp = igb_rx_bind(rx_data, rx_next, pkt_len);
if (mp == NULL)
- mp = igb_rx_copy(rx_ring, rx_next, pkt_len);
+ mp = igb_rx_copy(rx_data, rx_next, pkt_len);
if (mp != NULL) {
/*
@@ -379,34 +417,34 @@ rx_discard:
/*
* Reset rx descriptor read bits
*/
- current_rcb = rx_ring->work_list[rx_next];
+ current_rcb = rx_data->work_list[rx_next];
current_rbd->read.pkt_addr = current_rcb->rx_buf.dma_address;
current_rbd->read.hdr_addr = 0;
- rx_next = NEXT_INDEX(rx_next, 1, rx_ring->ring_size);
+ rx_next = NEXT_INDEX(rx_next, 1, rx_data->ring_size);
/*
* The receive function is in interrupt context, so here
- * limit_per_intr is used to avoid doing receiving too long
+ * rx_limit_per_intr is used to avoid doing receiving too long
* per interrupt.
*/
- if (++pkt_num > rx_ring->limit_per_intr) {
+ if (++pkt_num > igb->rx_limit_per_intr) {
IGB_DEBUG_STAT(rx_ring->stat_exceed_pkt);
break;
}
- current_rbd = &rx_ring->rbd_ring[rx_next];
+ current_rbd = &rx_data->rbd_ring[rx_next];
status_error = current_rbd->wb.upper.status_error;
}
- DMA_SYNC(&rx_ring->rbd_area, DDI_DMA_SYNC_FORDEV);
+ DMA_SYNC(&rx_data->rbd_area, DDI_DMA_SYNC_FORDEV);
- rx_ring->rbd_next = rx_next;
+ rx_data->rbd_next = rx_next;
/*
* Update the h/w tail accordingly
*/
- rx_tail = PREV_INDEX(rx_next, 1, rx_ring->ring_size);
+ rx_tail = PREV_INDEX(rx_next, 1, rx_data->ring_size);
E1000_WRITE_REG(&igb->hw, E1000_RDT(rx_ring->index), rx_tail);
diff --git a/usr/src/uts/common/io/igb/igb_sw.h b/usr/src/uts/common/io/igb/igb_sw.h
index 483f3342b2..0d1f62d481 100644
--- a/usr/src/uts/common/io/igb/igb_sw.h
+++ b/usr/src/uts/common/io/igb/igb_sw.h
@@ -22,7 +22,7 @@
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -84,6 +84,8 @@ extern "C" {
#define IGB_STALL 0x08
#define IGB_ERROR 0x80
+#define IGB_RX_STOPPED 0x1
+
#define IGB_INTR_NONE 0
#define IGB_INTR_MSIX 1
#define IGB_INTR_MSI 2
@@ -182,9 +184,7 @@ extern "C" {
#define ATTACH_PROGRESS_ADD_INTR 0x0020 /* Intr handlers added */
#define ATTACH_PROGRESS_LOCKS 0x0040 /* Locks initialized */
#define ATTACH_PROGRESS_INIT_ADAPTER 0x0080 /* Adapter initialized */
-#define ATTACH_PROGRESS_ALLOC_DMA 0x0100 /* DMA resources allocated */
#define ATTACH_PROGRESS_STATS 0x0200 /* Kstats created */
-#define ATTACH_PROGRESS_NDD 0x0400 /* NDD initialized */
#define ATTACH_PROGRESS_MAC 0x0800 /* MAC registered */
#define ATTACH_PROGRESS_ENABLE_INTR 0x1000 /* DDI interrupts enabled */
#define ATTACH_PROGRESS_FMINIT 0x2000 /* FMA initialized */
@@ -223,45 +223,6 @@ extern "C" {
#define IGB_LB_INTERNAL_PHY 3
#define IGB_LB_INTERNAL_SERDES 4
-/*
- * Shorthand for the NDD parameters
- */
-#define param_autoneg_cap nd_params[PARAM_AUTONEG_CAP].val
-#define param_pause_cap nd_params[PARAM_PAUSE_CAP].val
-#define param_asym_pause_cap nd_params[PARAM_ASYM_PAUSE_CAP].val
-#define param_1000fdx_cap nd_params[PARAM_1000FDX_CAP].val
-#define param_1000hdx_cap nd_params[PARAM_1000HDX_CAP].val
-#define param_100t4_cap nd_params[PARAM_100T4_CAP].val
-#define param_100fdx_cap nd_params[PARAM_100FDX_CAP].val
-#define param_100hdx_cap nd_params[PARAM_100HDX_CAP].val
-#define param_10fdx_cap nd_params[PARAM_10FDX_CAP].val
-#define param_10hdx_cap nd_params[PARAM_10HDX_CAP].val
-#define param_rem_fault nd_params[PARAM_REM_FAULT].val
-
-#define param_adv_autoneg_cap nd_params[PARAM_ADV_AUTONEG_CAP].val
-#define param_adv_pause_cap nd_params[PARAM_ADV_PAUSE_CAP].val
-#define param_adv_asym_pause_cap nd_params[PARAM_ADV_ASYM_PAUSE_CAP].val
-#define param_adv_1000fdx_cap nd_params[PARAM_ADV_1000FDX_CAP].val
-#define param_adv_1000hdx_cap nd_params[PARAM_ADV_1000HDX_CAP].val
-#define param_adv_100t4_cap nd_params[PARAM_ADV_100T4_CAP].val
-#define param_adv_100fdx_cap nd_params[PARAM_ADV_100FDX_CAP].val
-#define param_adv_100hdx_cap nd_params[PARAM_ADV_100HDX_CAP].val
-#define param_adv_10fdx_cap nd_params[PARAM_ADV_10FDX_CAP].val
-#define param_adv_10hdx_cap nd_params[PARAM_ADV_10HDX_CAP].val
-#define param_adv_rem_fault nd_params[PARAM_ADV_REM_FAULT].val
-
-#define param_lp_autoneg_cap nd_params[PARAM_LP_AUTONEG_CAP].val
-#define param_lp_pause_cap nd_params[PARAM_LP_PAUSE_CAP].val
-#define param_lp_asym_pause_cap nd_params[PARAM_LP_ASYM_PAUSE_CAP].val
-#define param_lp_1000fdx_cap nd_params[PARAM_LP_1000FDX_CAP].val
-#define param_lp_1000hdx_cap nd_params[PARAM_LP_1000HDX_CAP].val
-#define param_lp_100t4_cap nd_params[PARAM_LP_100T4_CAP].val
-#define param_lp_100fdx_cap nd_params[PARAM_LP_100FDX_CAP].val
-#define param_lp_100hdx_cap nd_params[PARAM_LP_100HDX_CAP].val
-#define param_lp_10fdx_cap nd_params[PARAM_LP_10FDX_CAP].val
-#define param_lp_10hdx_cap nd_params[PARAM_LP_10HDX_CAP].val
-#define param_lp_rem_fault nd_params[PARAM_LP_REM_FAULT].val
-
enum ioc_reply {
IOC_INVAL = -1, /* bad, NAK with EINVAL */
IOC_DONE, /* OK, reply sent */
@@ -370,73 +331,6 @@ typedef struct adapter_info {
uint32_t rxdctl_mask; /* mask for RXDCTL register */
} adapter_info_t;
-/*
- * Named Data (ND) Parameter Management Structure
- */
-typedef struct {
- struct igb *private;
- uint32_t info;
- uint32_t min;
- uint32_t max;
- uint32_t val;
- char *name;
-} nd_param_t;
-
-/*
- * NDD parameter indexes, divided into:
- *
- * read-only parameters describing the hardware's capabilities
- * read-write parameters controlling the advertised capabilities
- * read-only parameters describing the partner's capabilities
- * read-write parameters controlling the force speed and duplex
- * read-only parameters describing the link state
- * read-only parameters describing the driver properties
- * read-write parameters controlling the driver properties
- */
-enum {
- PARAM_AUTONEG_CAP,
- PARAM_PAUSE_CAP,
- PARAM_ASYM_PAUSE_CAP,
- PARAM_1000FDX_CAP,
- PARAM_1000HDX_CAP,
- PARAM_100T4_CAP,
- PARAM_100FDX_CAP,
- PARAM_100HDX_CAP,
- PARAM_10FDX_CAP,
- PARAM_10HDX_CAP,
- PARAM_REM_FAULT,
-
- PARAM_ADV_AUTONEG_CAP,
- PARAM_ADV_PAUSE_CAP,
- PARAM_ADV_ASYM_PAUSE_CAP,
- PARAM_ADV_1000FDX_CAP,
- PARAM_ADV_1000HDX_CAP,
- PARAM_ADV_100T4_CAP,
- PARAM_ADV_100FDX_CAP,
- PARAM_ADV_100HDX_CAP,
- PARAM_ADV_10FDX_CAP,
- PARAM_ADV_10HDX_CAP,
- PARAM_ADV_REM_FAULT,
-
- PARAM_LP_AUTONEG_CAP,
- PARAM_LP_PAUSE_CAP,
- PARAM_LP_ASYM_PAUSE_CAP,
- PARAM_LP_1000FDX_CAP,
- PARAM_LP_1000HDX_CAP,
- PARAM_LP_100T4_CAP,
- PARAM_LP_100FDX_CAP,
- PARAM_LP_100HDX_CAP,
- PARAM_LP_10FDX_CAP,
- PARAM_LP_10HDX_CAP,
- PARAM_LP_REM_FAULT,
-
- PARAM_LINK_STATUS,
- PARAM_LINK_SPEED,
- PARAM_LINK_DUPLEX,
-
- PARAM_COUNT
-};
-
typedef union igb_ether_addr {
struct {
uint32_t high;
@@ -455,11 +349,6 @@ typedef enum {
USE_DMA
} tx_type_t;
-typedef enum {
- RCB_FREE,
- RCB_SENDUP
-} rcb_state_t;
-
typedef struct tx_context {
uint32_t hcksum_flags;
uint32_t ip_hdr_len;
@@ -505,10 +394,10 @@ typedef struct tx_control_block {
*/
typedef struct rx_control_block {
mblk_t *mp;
- rcb_state_t state;
+ uint32_t ref_cnt;
dma_buffer_t rx_buf;
frtn_t free_rtn;
- struct igb_rx_ring *rx_ring;
+ struct igb_rx_data *rx_data;
} rx_control_block_t;
/*
@@ -558,10 +447,6 @@ typedef struct igb_tx_ring {
*/
uint32_t ring_size; /* Tx descriptor ring size */
uint32_t free_list_size; /* Tx free list size */
- uint32_t copy_thresh;
- uint32_t recycle_thresh;
- uint32_t overload_thresh;
- uint32_t resched_thresh;
boolean_t reschedule;
uint32_t recycle_fail;
@@ -589,14 +474,7 @@ typedef struct igb_tx_ring {
/*
* Software Receive Ring
*/
-typedef struct igb_rx_ring {
- uint32_t index; /* Ring index */
- uint32_t intr_vector; /* Interrupt vector index */
-
- /*
- * Mutexes
- */
- kmutex_t rx_lock; /* Rx access lock */
+typedef struct igb_rx_data {
kmutex_t recycle_lock; /* Recycle lock, for rcb_tail */
/*
@@ -617,12 +495,27 @@ typedef struct igb_rx_ring {
uint32_t rcb_free; /* Number of free rcbs */
/*
- * Rx ring settings and status
+ * Rx sw ring settings and status
*/
uint32_t ring_size; /* Rx descriptor ring size */
uint32_t free_list_size; /* Rx free list size */
- uint32_t limit_per_intr; /* Max packets per interrupt */
- uint32_t copy_thresh;
+
+ uint32_t rcb_pending;
+ uint32_t flag;
+
+ struct igb_rx_ring *rx_ring; /* Pointer to rx ring */
+} igb_rx_data_t;
+
+/*
+ * Software Data Structure for Rx Ring
+ */
+typedef struct igb_rx_ring {
+ uint32_t index; /* Ring index */
+ uint32_t intr_vector; /* Interrupt vector index */
+
+ igb_rx_data_t *rx_data; /* Rx software ring */
+
+ kmutex_t rx_lock; /* Rx access lock */
#ifdef IGB_DEBUG
/*
@@ -669,9 +562,12 @@ typedef struct igb {
uint32_t reset_count;
uint32_t attach_progress;
uint32_t loopback_mode;
+ uint32_t default_mtu;
uint32_t max_frame_size;
uint32_t dout_sync;
+ uint32_t rcb_pending;
+
uint32_t mr_enable; /* Enable multiple rings */
uint32_t vmdq_mode; /* Mode of VMDq */
@@ -693,6 +589,7 @@ typedef struct igb {
uint32_t tx_ring_size; /* Tx descriptor ring size */
uint32_t tx_buf_size; /* Tx buffer size */
+ boolean_t tx_ring_init;
boolean_t tx_head_wb_enable; /* Tx head wrtie-back */
boolean_t tx_hcksum_enable; /* Tx h/w cksum offload */
boolean_t lso_enable; /* Large Segment Offload */
@@ -719,6 +616,7 @@ typedef struct igb {
kmutex_t gen_lock; /* General lock for device access */
kmutex_t watchdog_lock;
kmutex_t link_lock;
+ kmutex_t rx_pending_lock;
boolean_t watchdog_enable;
boolean_t watchdog_start;
@@ -738,11 +636,48 @@ typedef struct igb {
*/
kstat_t *igb_ks;
- /*
- * NDD definitions
- */
- caddr_t nd_data;
- nd_param_t nd_params[PARAM_COUNT];
+ uint32_t param_en_1000fdx_cap:1,
+ param_en_1000hdx_cap:1,
+ param_en_100t4_cap:1,
+ param_en_100fdx_cap:1,
+ param_en_100hdx_cap:1,
+ param_en_10fdx_cap:1,
+ param_en_10hdx_cap:1,
+ param_1000fdx_cap:1,
+ param_1000hdx_cap:1,
+ param_100t4_cap:1,
+ param_100fdx_cap:1,
+ param_100hdx_cap:1,
+ param_10fdx_cap:1,
+ param_10hdx_cap:1,
+ param_autoneg_cap:1,
+ param_pause_cap:1,
+ param_asym_pause_cap:1,
+ param_rem_fault:1,
+ param_adv_1000fdx_cap:1,
+ param_adv_1000hdx_cap:1,
+ param_adv_100t4_cap:1,
+ param_adv_100fdx_cap:1,
+ param_adv_100hdx_cap:1,
+ param_adv_10fdx_cap:1,
+ param_adv_10hdx_cap:1,
+ param_adv_autoneg_cap:1,
+ param_adv_pause_cap:1,
+ param_adv_asym_pause_cap:1,
+ param_adv_rem_fault:1,
+ param_lp_1000fdx_cap:1,
+ param_lp_1000hdx_cap:1,
+ param_lp_100t4_cap:1;
+
+ uint32_t param_lp_100fdx_cap:1,
+ param_lp_100hdx_cap:1,
+ param_lp_10fdx_cap:1,
+ param_lp_10hdx_cap:1,
+ param_lp_autoneg_cap:1,
+ param_lp_pause_cap:1,
+ param_lp_asym_pause_cap:1,
+ param_lp_rem_fault:1,
+ param_pad_to_32:24;
/*
* FMA capabilities
@@ -838,12 +773,15 @@ void e1000_rar_set_vmdq(struct e1000_hw *, const uint8_t *, uint32_t,
*/
int igb_alloc_dma(igb_t *);
void igb_free_dma(igb_t *);
+void igb_free_dma_buffer(dma_buffer_t *);
+int igb_alloc_rx_ring_data(igb_rx_ring_t *rx_ring);
+void igb_free_rx_ring_data(igb_rx_data_t *rx_data);
/*
* Function prototypes in igb_main.c
*/
-int igb_start(igb_t *);
-void igb_stop(igb_t *);
+int igb_start(igb_t *, boolean_t);
+void igb_stop(igb_t *, boolean_t);
int igb_setup_link(igb_t *, boolean_t);
int igb_unicst_find(igb_t *, const uint8_t *);
int igb_unicst_set(igb_t *, const uint8_t *, int);
@@ -872,10 +810,18 @@ void igb_m_ioctl(void *, queue_t *, mblk_t *);
boolean_t igb_m_getcapab(void *, mac_capab_t, void *);
void igb_fill_ring(void *, mac_ring_type_t, const int, const int,
mac_ring_info_t *, mac_ring_handle_t);
+int igb_m_setprop(void *, const char *, mac_prop_id_t, uint_t, const void *);
+int igb_m_getprop(void *, const char *, mac_prop_id_t,
+ uint_t, uint_t, void *, uint_t *);
+int igb_set_priv_prop(igb_t *, const char *, uint_t, const void *);
+int igb_get_priv_prop(igb_t *, const char *,
+ uint_t, uint_t, void *, uint_t *);
+boolean_t igb_param_locked(mac_prop_id_t);
void igb_fill_group(void *arg, mac_ring_type_t, const int,
mac_group_info_t *, mac_group_handle_t);
int igb_rx_ring_intr_enable(mac_intr_handle_t);
int igb_rx_ring_intr_disable(mac_intr_handle_t);
+int igb_get_def_val(igb_t *, mac_prop_id_t, uint_t, void *);
/*
* Function prototypes in igb_rx.c
@@ -899,13 +845,6 @@ void igb_log(void *, const char *, ...);
void igb_error(void *, const char *, ...);
/*
- * Function prototypes in igb_ndd.c
- */
-int igb_nd_init(igb_t *);
-void igb_nd_cleanup(igb_t *);
-enum ioc_reply igb_nd_ioctl(igb_t *, queue_t *, mblk_t *, struct iocblk *);
-
-/*
* Function prototypes in igb_stat.c
*/
int igb_init_stats(igb_t *);
diff --git a/usr/src/uts/common/io/igb/igb_tx.c b/usr/src/uts/common/io/igb/igb_tx.c
index 54bde3d330..9deef24989 100644
--- a/usr/src/uts/common/io/igb/igb_tx.c
+++ b/usr/src/uts/common/io/igb/igb_tx.c
@@ -22,7 +22,7 @@
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -109,13 +109,14 @@ igb_tx(igb_tx_ring_t *tx_ring, mblk_t *mp)
tx_control_block_t *tcb;
tx_context_t tx_context, *ctx;
link_list_t pending_list;
- mblk_t *new_mp;
- mblk_t *previous_mp;
+ mblk_t *hdr_new_mp = NULL;
+ mblk_t *hdr_previous_mp = NULL;
+ mblk_t *hdr_current_mp = NULL;
uint32_t hdr_frag_len;
uint32_t hdr_len, len;
uint32_t copy_thresh;
- copy_thresh = tx_ring->copy_thresh;
+ copy_thresh = igb->tx_copy_thresh;
/* Get the mblk size */
mbsize = 0;
@@ -156,15 +157,15 @@ igb_tx(igb_tx_ring_t *tx_ring, mblk_t *mp)
* Check and recycle tx descriptors.
* The recycle threshold here should be selected carefully
*/
- if (tx_ring->tbd_free < tx_ring->recycle_thresh)
+ if (tx_ring->tbd_free < igb->tx_recycle_thresh)
tx_ring->tx_recycle(tx_ring);
/*
* After the recycling, if the tbd_free is less than the
- * overload_threshold, assert overload, return B_FALSE;
+ * tx_overload_threshold, assert overload, return B_FALSE;
* and we need to re-schedule the tx again.
*/
- if (tx_ring->tbd_free < tx_ring->overload_thresh) {
+ if (tx_ring->tbd_free < igb->tx_overload_thresh) {
tx_ring->reschedule = B_TRUE;
IGB_DEBUG_STAT(tx_ring->stat_overload);
return (B_FALSE);
@@ -177,77 +178,64 @@ igb_tx(igb_tx_ring_t *tx_ring, mblk_t *mp)
* the headers(MAC+IP+TCP) is physical memory non-contiguous.
*/
if (ctx && ctx->lso_flag) {
- hdr_len = ctx->mac_hdr_len + ctx->ip_hdr_len +
- ctx->l4_hdr_len;
+ hdr_len = ctx->mac_hdr_len + ctx->ip_hdr_len + ctx->l4_hdr_len;
len = MBLKL(mp);
- current_mp = mp;
- previous_mp = NULL;
+ hdr_current_mp = mp;
while (len < hdr_len) {
- previous_mp = current_mp;
- current_mp = current_mp->b_cont;
- len += MBLKL(current_mp);
+ hdr_previous_mp = hdr_current_mp;
+ hdr_current_mp = hdr_current_mp->b_cont;
+ len += MBLKL(hdr_current_mp);
}
-
/*
- * If len is larger than copy_thresh, we do not
- * need to do anything since igb's tx copy mechanism
- * will ensure that the headers will be handled
- * in one descriptor.
+ * If the header and the payload are in different mblks,
+ * we simply force the header to be copied into pre-allocated
+ * page-aligned buffer.
*/
- if (len > copy_thresh) {
- if (len != hdr_len) {
- /*
- * If the header and the payload are in
- * different mblks, we simply force the
- * header to be copied into a
- * new-allocated buffer.
- */
- hdr_frag_len = hdr_len -
- (len - MBLKL(current_mp));
+ if (len == hdr_len)
+ goto adjust_threshold;
- /*
- * There are two cases we will reallocate
- * a mblk for the last header fragment.
- * 1. the header is in multiple mblks and
- * the last fragment shares the same mblk
- * with the payload
- * 2. the header is in a single mblk shared
- * with the payload but the header crosses
- * a page.
- */
- if ((current_mp != mp) ||
- (P2NPHASE((uintptr_t)current_mp->b_rptr,
- igb->page_size) < hdr_len)) {
- /*
- * reallocate the mblk for the last
- * header fragment, expect it to be
- * copied into pre-allocated
- * page-aligned buffer
- */
- new_mp = allocb(hdr_frag_len, NULL);
- if (!new_mp) {
- return (B_FALSE);
- }
-
- /*
- * Insert the new mblk
- */
- bcopy(current_mp->b_rptr,
- new_mp->b_rptr, hdr_frag_len);
- new_mp->b_wptr = new_mp->b_rptr +
- hdr_frag_len;
- new_mp->b_cont = current_mp;
- if (previous_mp)
- previous_mp->b_cont = new_mp;
- else
- mp = new_mp;
- current_mp->b_rptr += hdr_frag_len;
- }
+ hdr_frag_len = hdr_len - (len - MBLKL(hdr_current_mp));
+ /*
+ * There are two cases we will reallocate
+ * a mblk for the last header fragment.
+ * 1. the header is in multiple mblks and
+ * the last fragment shares the same mblk
+ * with the payload
+ * 2. the header is in a single mblk shared
+ * with the payload but the header crosses
+ * a page.
+ */
+ if ((hdr_current_mp != mp) ||
+ (P2NPHASE((uintptr_t)hdr_current_mp->b_rptr, igb->page_size)
+ < hdr_len)) {
+ /*
+ * reallocate the mblk for the last header fragment,
+ * expect it to be copied into pre-allocated
+ * page-aligned buffer
+ */
+ hdr_new_mp = allocb(hdr_frag_len, NULL);
+ if (!hdr_new_mp) {
+ return (B_FALSE);
}
- if (copy_thresh < hdr_len)
- copy_thresh = hdr_len;
+ /* link the new header fragment with the other parts */
+ bcopy(hdr_current_mp->b_rptr,
+ hdr_new_mp->b_rptr, hdr_frag_len);
+ hdr_new_mp->b_wptr = hdr_new_mp->b_rptr + hdr_frag_len;
+ hdr_new_mp->b_cont = hdr_current_mp;
+ if (hdr_previous_mp)
+ hdr_previous_mp->b_cont = hdr_new_mp;
+ else
+ mp = hdr_new_mp;
+ hdr_current_mp->b_rptr += hdr_frag_len;
}
+adjust_threshold:
+ /*
+ * adjust the bcopy threshhold to guarantee
+ * the header to use bcopy way
+ */
+ if (copy_thresh < hdr_len)
+ copy_thresh = hdr_len;
}
/*
@@ -436,6 +424,21 @@ igb_tx(igb_tx_ring_t *tx_ring, mblk_t *mp)
tx_failure:
/*
+ * If new mblk has been allocted for the last header
+ * fragment of a LSO packet, we should restore the
+ * modified mp.
+ */
+ if (hdr_new_mp) {
+ hdr_new_mp->b_cont = NULL;
+ freeb(hdr_new_mp);
+ hdr_current_mp->b_rptr -= hdr_frag_len;
+ if (hdr_previous_mp)
+ hdr_previous_mp->b_cont = hdr_current_mp;
+ else
+ mp = hdr_current_mp;
+ }
+
+ /*
* Discard the mblk and free the used resources
*/
tcb = (tx_control_block_t *)LIST_GET_HEAD(&pending_list);
diff --git a/usr/src/uts/intel/igb/Makefile b/usr/src/uts/intel/igb/Makefile
index 72ff7af3c7..461f653159 100644
--- a/usr/src/uts/intel/igb/Makefile
+++ b/usr/src/uts/intel/igb/Makefile
@@ -19,11 +19,9 @@
# CDDL HEADER END
#
#
-# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Copyright 2010 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
-# ident "%Z%%M% %I% %E% SMI"
-#
# uts/intel/igb/Makefile
#
# This makefile drives the production of the igb
@@ -59,9 +57,9 @@ LINT_TARGET = $(MODULE).lint
INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOT_CONFFILE)
#
-# Driver depends on MAC & IP
+# Driver depends on MAC
#
-LDFLAGS += -dy -N misc/mac -N drv/ip
+LDFLAGS += -dy -N misc/mac
#
# Default build targets.
diff --git a/usr/src/uts/sparc/igb/Makefile b/usr/src/uts/sparc/igb/Makefile
index 53ab2ec408..3a721ac204 100644
--- a/usr/src/uts/sparc/igb/Makefile
+++ b/usr/src/uts/sparc/igb/Makefile
@@ -19,11 +19,9 @@
# CDDL HEADER END
#
#
-# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Copyright 2010 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
-# ident "%Z%%M% %I% %E% SMI"
-#
# uts/sparc/igb/Makefile
#
# This makefile drives the production of the igb
@@ -74,9 +72,9 @@ CFLAGS += $(CCVERBOSE)
CFLAGS += -dalign
#
-# Driver depends on MAC & IP
+# Driver depends on MAC
#
-LDFLAGS += -dy -N misc/mac -N drv/ip
+LDFLAGS += -dy -N misc/mac
#
# Default build targets.