diff options
| author | Jerry Jelinek <jerry.jelinek@joyent.com> | 2015-03-12 19:05:19 +0000 |
|---|---|---|
| committer | Jerry Jelinek <jerry.jelinek@joyent.com> | 2015-03-12 19:05:19 +0000 |
| commit | 4e07db359c9c712d10537e49bc9ed64fbfd5379e (patch) | |
| tree | 2318917051212e7ef38c27f546f4f84f0c57f3f6 | |
| parent | 4761371227e50ad640ef47fc3ca0e6f8cad78682 (diff) | |
| parent | c124a83e09115de88ecccd4f689983f42a1d53bd (diff) | |
| download | illumos-joyent-4e07db359c9c712d10537e49bc9ed64fbfd5379e.tar.gz | |
[illumos-gate merge]
commit c124a83e09115de88ecccd4f689983f42a1d53bd
5689 Want support for newer Intel I218 parts
5690 Want support for newer Intel I210 parts
commit 087a28d18c24cf4938e8a2617b5127a2fd29ddf4
3446 Update bge to support missing 57xx/577xx devices
commit fc01d378bd20b13dcb27185d874fc628d851b161
5106 elfdump compiles using headers from the build system (fix noise)
49 files changed, 4412 insertions, 1555 deletions
diff --git a/exception_lists/cstyle b/exception_lists/cstyle index 570b908c1d..aadf35d05e 100644 --- a/exception_lists/cstyle +++ b/exception_lists/cstyle @@ -881,6 +881,13 @@ usr/src/uts/common/io/bnxe/bnxe_tx.c usr/src/uts/common/io/bnxe/bnxe_workq.c usr/src/uts/common/io/bnxe/bnxe.h usr/src/uts/common/io/bnxe/version.h +usr/src/uts/common/io/bge/bge_main2.c +usr/src/uts/common/io/bge/bge_chip2.c +usr/src/uts/common/io/bge/bge_mii.c +usr/src/uts/common/io/bge/bge_kstats.c +usr/src/uts/common/io/bge/bge_impl.h +usr/src/uts/common/io/bge/bge_send.c +usr/src/uts/common/io/bge/bge_hw.h usr/src/uts/common/io/e1000api/e1000_80003es2lan.c usr/src/uts/common/io/e1000api/e1000_80003es2lan.h usr/src/uts/common/io/e1000api/e1000_82540.c diff --git a/exception_lists/hdrchk b/exception_lists/hdrchk index 0d9457d622..60c60a527e 100644 --- a/exception_lists/hdrchk +++ b/exception_lists/hdrchk @@ -287,6 +287,15 @@ usr/src/uts/common/io/bnxe/bnxe_binding.h usr/src/uts/common/io/bnxe/bnxe_debug.h usr/src/uts/common/io/bnxe/bnxe.h usr/src/uts/common/io/bnxe/version.h +usr/src/uts/common/io/e1000api/e1000_mac.h +usr/src/uts/common/io/e1000api/e1000_82543.h +usr/src/uts/common/io/e1000api/e1000_regs.h +usr/src/uts/common/io/e1000api/e1000_phy.h +usr/src/uts/common/io/e1000api/e1000_nvm.h +usr/src/uts/common/io/e1000api/e1000_82571.h +usr/src/uts/common/io/e1000api/e1000_80003es2lan.h +usr/src/uts/common/io/e1000api/e1000_manage.h +usr/src/uts/common/io/e1000api/e1000_i210.h usr/src/uts/common/io/ixgbe/ixgbe_common.h usr/src/uts/common/io/nfp/nfp.h usr/src/uts/common/io/nfp/nfpci.h diff --git a/usr/src/cmd/sgs/elfdump/Makefile.targ b/usr/src/cmd/sgs/elfdump/Makefile.targ index c304eccf79..971d9dfeb4 100644 --- a/usr/src/cmd/sgs/elfdump/Makefile.targ +++ b/usr/src/cmd/sgs/elfdump/Makefile.targ @@ -54,10 +54,10 @@ check_struct_layout: gen_struct_layout gen_layout_obj.o > struct_layout_$(ARCH).tmp ./gen_struct_layout gen_layout_obj.o $(ARCH) \ >> struct_layout_$(ARCH).tmp - diff -u struct_layout_$(ARCH).tmp \ + @diff -u struct_layout_$(ARCH).tmp \ ../common/struct_layout_$(ARCH).c ||\ { echo "Error: struct_layout_$(ARCH).c needs update!" ; exit 1; } - touch check_struct_layout + touch $@ # We need CTF data in this object. gen_layout_obj.o := CFLAGS += $(CTF_FLAGS) diff --git a/usr/src/pkg/manifests/driver-network-bge.mf b/usr/src/pkg/manifests/driver-network-bge.mf index 74b2555119..0b0cbb6898 100644 --- a/usr/src/pkg/manifests/driver-network-bge.mf +++ b/usr/src/pkg/manifests/driver-network-bge.mf @@ -77,11 +77,15 @@ $(i386_ONLY)driver name=bge clone_perms="bge 0666 root sys" \ alias=pci14e4,16a7 \ alias=pci14e4,16a8 \ alias=pci14e4,16c7 \ + alias=pciex14e4,1643 \ alias=pciex14e4,1655 \ alias=pciex14e4,1656 \ + alias=pciex14e4,1657 \ alias=pciex14e4,165a \ alias=pciex14e4,165b \ alias=pciex14e4,165c \ + alias=pciex14e4,165f \ + alias=pciex14e4,1665 \ alias=pciex14e4,1673 \ alias=pciex14e4,1674 \ alias=pciex14e4,1677 \ @@ -92,6 +96,7 @@ $(i386_ONLY)driver name=bge clone_perms="bge 0666 root sys" \ alias=pciex14e4,1684 \ alias=pciex14e4,1692 \ alias=pciex14e4,169d \ + alias=pciex14e4,16f3 \ alias=pciex14e4,16fd \ alias=pciex14e4,1713 $(sparc_ONLY)driver name=bge clone_perms="bge 0666 root sys" \ diff --git a/usr/src/pkg/manifests/driver-network-e1000g.mf b/usr/src/pkg/manifests/driver-network-e1000g.mf index 1690b9af07..1aef7965a4 100644 --- a/usr/src/pkg/manifests/driver-network-e1000g.mf +++ b/usr/src/pkg/manifests/driver-network-e1000g.mf @@ -138,6 +138,10 @@ driver name=e1000g clone_perms="e1000g 0666 root sys" perms="* 0666 root sys" \ alias=pci8086,153b \ alias=pci8086,1559 \ alias=pci8086,155a \ + alias=pci8086,15a0 \ + alias=pci8086,15a1 \ + alias=pci8086,15a2 \ + alias=pci8086,15a3 \ alias=pci8086,294c \ alias=pci8086,f0fe \ alias=pciex8086,1049 \ @@ -193,6 +197,10 @@ driver name=e1000g clone_perms="e1000g 0666 root sys" perms="* 0666 root sys" \ alias=pciex8086,153b \ alias=pciex8086,1559 \ alias=pciex8086,155a \ + alias=pciex8086,15a0 \ + alias=pciex8086,15a1 \ + alias=pciex8086,15a2 \ + alias=pciex8086,15a3 \ alias=pciex8086,294c \ alias=pciex8086,f0fe file path=kernel/drv/$(ARCH64)/e1000g group=sys diff --git a/usr/src/pkg/manifests/driver-network-igb.mf b/usr/src/pkg/manifests/driver-network-igb.mf index 8aad9b05f4..669ec3b24c 100644 --- a/usr/src/pkg/manifests/driver-network-igb.mf +++ b/usr/src/pkg/manifests/driver-network-igb.mf @@ -71,6 +71,8 @@ driver name=igb clone_perms="igb 0666 root sys" perms="* 0666 root sys" \ alias=pciex8086,1538 \ alias=pciex8086,1539 \ alias=pciex8086,1546 \ + alias=pciex8086,157b \ + alias=pciex8086,157c \ alias=pciex8086,1f40 \ alias=pciex8086,1f41 \ alias=pciex8086,1f45 \ diff --git a/usr/src/uts/common/io/bge/bge_chip2.c b/usr/src/uts/common/io/bge/bge_chip2.c index f687ce4892..7790c2df6e 100644 --- a/usr/src/uts/common/io/bge/bge_chip2.c +++ b/usr/src/uts/common/io/bge/bge_chip2.c @@ -20,16 +20,19 @@ */ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010-2013, by Broadcom, Inc. + * All Rights Reserved. */ /* - * Copyright 2011 Nexenta Systems, Inc. All rights reserved. + * Copyright (c) 2002, 2010, Oracle and/or its affiliates. + * All rights reserved. */ #include "bge_impl.h" #define PIO_ADDR(bgep, offset) ((void *)((caddr_t)(bgep)->io_regs+(offset))) +#define APE_ADDR(bgep, offset) ((void *)((caddr_t)(bgep)->ape_regs+(offset))) /* * Future features ... ? @@ -54,11 +57,6 @@ boolean_t bge_enable_msi = B_TRUE; boolean_t bge_relaxed_ordering = B_TRUE; /* - * Property names - */ -static char knownids_propname[] = "bge-known-subsystems"; - -/* * Patchable globals: * * bge_autorecover @@ -98,7 +96,7 @@ static uint32_t bge_dma_rwctrl_5714 = PDRWCR_VAR_5714; static uint32_t bge_dma_rwctrl_5715 = PDRWCR_VAR_5715; uint32_t bge_rx_ticks_norm = 128; -uint32_t bge_tx_ticks_norm = 2048; /* 8 for FJ2+ !?!? */ +uint32_t bge_tx_ticks_norm = 512; uint32_t bge_rx_count_norm = 8; uint32_t bge_tx_count_norm = 128; @@ -243,8 +241,10 @@ bge_ind_get32(bge_t *bgep, bge_regno_t regno) BGE_TRACE(("bge_ind_get32($%p, 0x%lx)", (void *)bgep, regno)); #ifdef __sparc - if (DEVICE_5717_SERIES_CHIPSETS(bgep)) + if (DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) { regno = LE_32(regno); + } #endif pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_RIAAR, regno); val = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_RIADR); @@ -268,8 +268,10 @@ bge_ind_put32(bge_t *bgep, bge_regno_t regno, uint32_t val) val = LE_32(val); #ifdef __sparc - if (DEVICE_5717_SERIES_CHIPSETS(bgep)) + if (DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) { regno = LE_32(regno); + } #endif pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_RIAAR, regno); pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_RIADR, val); @@ -327,6 +329,8 @@ bge_chip_cfg_init(bge_t *bgep, chip_id_t *cidp, boolean_t enable_dma) ddi_acc_handle_t handle; uint16_t command; uint32_t mhcr; + uint32_t prodid; + uint32_t pci_state; uint16_t value16; int i; @@ -360,10 +364,23 @@ bge_chip_cfg_init(bge_t *bgep, chip_id_t *cidp, boolean_t enable_dma) * byte-swapped value to it. So we just write zero first for simplicity. */ cidp->device = pci_config_get16(handle, PCI_CONF_DEVID); - if (DEVICE_5717_SERIES_CHIPSETS(bgep)) + if (DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) { pci_config_put32(handle, PCI_CONF_BGE_MHCR, 0); + } + mhcr = pci_config_get32(handle, PCI_CONF_BGE_MHCR); - cidp->asic_rev = mhcr & MHCR_CHIP_REV_MASK; + cidp->asic_rev = (mhcr & MHCR_CHIP_REV_MASK); + cidp->asic_rev_prod_id = 0; + if ((cidp->asic_rev & 0xf0000000) == CHIP_ASIC_REV_USE_PROD_ID_REG) { + prodid = CHIP_ASIC_REV_PROD_ID_REG; + if (DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) { + prodid = CHIP_ASIC_REV_PROD_ID_GEN2_REG; + } + cidp->asic_rev_prod_id = pci_config_get32(handle, prodid); + } + cidp->businfo = pci_config_get32(handle, PCI_CONF_BGE_PCISTATE); cidp->command = pci_config_get16(handle, PCI_CONF_COMM); @@ -374,6 +391,12 @@ bge_chip_cfg_init(bge_t *bgep, chip_id_t *cidp, boolean_t enable_dma) cidp->clsize = pci_config_get8(handle, PCI_CONF_CACHE_LINESZ); cidp->latency = pci_config_get8(handle, PCI_CONF_LATENCY_TIMER); + /* 5717 C0 is treated just like 5720 A0 */ + if (pci_config_get16(bgep->cfg_handle, PCI_CONF_DEVID) == + DEVICE_ID_5717_C0) { + cidp->device = DEVICE_ID_5720; + } + BGE_DEBUG(("bge_chip_cfg_init: %s bus is %s and %s; #INTA is %s", cidp->businfo & PCISTATE_BUS_IS_PCI ? "PCI" : "PCI-X", cidp->businfo & PCISTATE_BUS_IS_FAST ? "fast" : "slow", @@ -445,25 +468,31 @@ bge_chip_cfg_init(bge_t *bgep, chip_id_t *cidp, boolean_t enable_dma) * see whether the host is truly up to date, and regenerate * its interrupt if not. */ - mhcr = MHCR_ENABLE_INDIRECT_ACCESS | - MHCR_ENABLE_TAGGED_STATUS_MODE | - MHCR_MASK_INTERRUPT_MODE | - MHCR_CLEAR_INTERRUPT_INTA; - + mhcr = MHCR_ENABLE_INDIRECT_ACCESS | + MHCR_ENABLE_PCI_STATE_RW | + MHCR_ENABLE_TAGGED_STATUS_MODE | + MHCR_MASK_INTERRUPT_MODE | + MHCR_CLEAR_INTERRUPT_INTA; if (bgep->intr_type == DDI_INTR_TYPE_FIXED) mhcr |= MHCR_MASK_PCI_INT_OUTPUT; #ifdef _BIG_ENDIAN mhcr |= MHCR_ENABLE_ENDIAN_WORD_SWAP | MHCR_ENABLE_ENDIAN_BYTE_SWAP; #endif /* _BIG_ENDIAN */ - - if (DEVICE_5717_SERIES_CHIPSETS(bgep)) - pci_config_put32(handle, PCI_CONF_BGE_MHCR, 0); pci_config_put32(handle, PCI_CONF_BGE_MHCR, mhcr); #ifdef BGE_IPMI_ASF bgep->asf_wordswapped = B_FALSE; #endif + + pci_state = (PCISTATE_EXT_ROM_ENABLE | PCISTATE_EXT_ROM_RETRY); + /* allow reads and writes to the APE register and memory space */ + if (bgep->ape_enabled) { + pci_state |= PCISTATE_ALLOW_APE_CTLSPC_WR | + PCISTATE_ALLOW_APE_SHMEM_WR | PCISTATE_ALLOW_APE_PSPACE_WR; + } + pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_PCISTATE, pci_state); + /* * Step 1 (also step 7): Enable PCI Memory Space accesses * Disable Memory Write/Invalidate @@ -533,9 +562,14 @@ bge_chip_cfg_init(bge_t *bgep, chip_id_t *cidp, boolean_t enable_dma) if (DEVICE_5723_SERIES_CHIPSETS(bgep)) { bge_cfg_clr16(bgep, PCI_CONF_DEV_CTRL_5723, DEV_CTRL_NO_SNOOP | DEV_CTRL_RELAXED); - } else + } else if (DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) { + bge_cfg_clr16(bgep, PCI_CONF_DEV_CTRL_5717, + DEV_CTRL_NO_SNOOP | DEV_CTRL_RELAXED); + } else { bge_cfg_clr16(bgep, PCI_CONF_DEV_CTRL, DEV_CTRL_NO_SNOOP | DEV_CTRL_RELAXED); + } } } @@ -629,8 +663,10 @@ bge_reg_get64(bge_t *bgep, bge_regno_t regno) uint64_t regval; #ifdef __amd64 - if (DEVICE_5723_SERIES_CHIPSETS(bgep) || bge_get_em64t_type() || - DEVICE_5717_SERIES_CHIPSETS(bgep)) { + if (DEVICE_5723_SERIES_CHIPSETS(bgep) || + bge_get_em64t_type() || + DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) { regval = ddi_get32(bgep->io_handle, PIO_ADDR(bgep, regno + 4)); regval <<= 32; regval |= ddi_get32(bgep->io_handle, PIO_ADDR(bgep, regno)); @@ -639,7 +675,8 @@ bge_reg_get64(bge_t *bgep, bge_regno_t regno) } #elif defined(__sparc) if (DEVICE_5723_SERIES_CHIPSETS(bgep) || - DEVICE_5717_SERIES_CHIPSETS(bgep)) { + DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) { regval = ddi_get32(bgep->io_handle, PIO_ADDR(bgep, regno)); regval <<= 32; regval |= ddi_get32(bgep->io_handle, PIO_ADDR(bgep, regno + 4)); @@ -674,8 +711,10 @@ bge_reg_put64(bge_t *bgep, bge_regno_t regno, uint64_t data) #endif /* _LITTLE_ENDIAN */ #ifdef __amd64 - if (DEVICE_5723_SERIES_CHIPSETS(bgep) || bge_get_em64t_type() || - DEVICE_5717_SERIES_CHIPSETS(bgep)) { + if (DEVICE_5723_SERIES_CHIPSETS(bgep) || + bge_get_em64t_type() || + DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) { ddi_put32(bgep->io_handle, PIO_ADDR(bgep, regno), (uint32_t)data); BGE_PCICHK(bgep); @@ -687,7 +726,8 @@ bge_reg_put64(bge_t *bgep, bge_regno_t regno, uint64_t data) } #elif defined(__sparc) if (DEVICE_5723_SERIES_CHIPSETS(bgep) || - DEVICE_5717_SERIES_CHIPSETS(bgep)) { + DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) { ddi_put32(bgep->io_handle, PIO_ADDR(bgep, regno + 4), (uint32_t)data); BGE_PCICHK(bgep); @@ -845,8 +885,10 @@ bge_nic_setwin(bge_t *bgep, bge_regno_t base) B_TRUE : B_FALSE); } #ifdef __sparc - if (DEVICE_5717_SERIES_CHIPSETS(bgep)) + if (DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) { base = LE_32(base); + } #endif pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MWBAR, base); } @@ -905,8 +947,10 @@ bge_nic_put32(bge_t *bgep, bge_regno_t addr, uint32_t data) #endif #ifdef __sparc - if (DEVICE_5717_SERIES_CHIPSETS(bgep)) + if (DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) { addr = LE_32(addr); + } pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MWBAR, addr); data = LE_32(data); pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MWDAR, data); @@ -933,8 +977,10 @@ bge_nic_get64(bge_t *bgep, bge_regno_t addr) addr += NIC_MEM_WINDOW_OFFSET; #ifdef __amd64 - if (DEVICE_5723_SERIES_CHIPSETS(bgep) || bge_get_em64t_type() || - DEVICE_5717_SERIES_CHIPSETS(bgep)) { + if (DEVICE_5723_SERIES_CHIPSETS(bgep) || + bge_get_em64t_type() || + DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) { data = ddi_get32(bgep->io_handle, PIO_ADDR(bgep, addr + 4)); data <<= 32; @@ -944,7 +990,8 @@ bge_nic_get64(bge_t *bgep, bge_regno_t addr) } #elif defined(__sparc) if (DEVICE_5723_SERIES_CHIPSETS(bgep) || - DEVICE_5717_SERIES_CHIPSETS(bgep)) { + DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) { data = ddi_get32(bgep->io_handle, PIO_ADDR(bgep, addr)); data <<= 32; data |= ddi_get32(bgep->io_handle, @@ -976,8 +1023,10 @@ bge_nic_put64(bge_t *bgep, bge_regno_t addr, uint64_t data) addr += NIC_MEM_WINDOW_OFFSET; #ifdef __amd64 - if (DEVICE_5723_SERIES_CHIPSETS(bgep) || bge_get_em64t_type() || - DEVICE_5717_SERIES_CHIPSETS(bgep)) { + if (DEVICE_5723_SERIES_CHIPSETS(bgep) || + bge_get_em64t_type() || + DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) { ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr + 4), (uint32_t)data); BGE_PCICHK(bgep); @@ -988,7 +1037,8 @@ bge_nic_put64(bge_t *bgep, bge_regno_t addr, uint64_t data) } #elif defined(__sparc) if (DEVICE_5723_SERIES_CHIPSETS(bgep) || - DEVICE_5717_SERIES_CHIPSETS(bgep)) { + DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) { ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr + 4), (uint32_t)data); BGE_PCICHK(bgep); @@ -1028,8 +1078,10 @@ bge_nic_putrcb(bge_t *bgep, bge_regno_t addr, bge_rcb_t *rcbp) p = (void *)rcbp; #ifdef __amd64 - if (DEVICE_5723_SERIES_CHIPSETS(bgep) || bge_get_em64t_type() || - DEVICE_5717_SERIES_CHIPSETS(bgep)) { + if (DEVICE_5723_SERIES_CHIPSETS(bgep) || + bge_get_em64t_type() || + DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) { ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr), (uint32_t)(*p)); ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr + 4), @@ -1045,7 +1097,8 @@ bge_nic_putrcb(bge_t *bgep, bge_regno_t addr, bge_rcb_t *rcbp) } #elif defined(__sparc) if (DEVICE_5723_SERIES_CHIPSETS(bgep) || - DEVICE_5717_SERIES_CHIPSETS(bgep)) { + DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) { ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr + 4), (uint32_t)(*p)); ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr), @@ -1246,6 +1299,32 @@ bge_mii_put16(bge_t *bgep, bge_regno_t regno, uint16_t data) (void) bge_mii_access(bgep, regno, data, MI_COMMS_COMMAND_WRITE); } +uint16_t +bge_phydsp_read(bge_t *bgep, bge_regno_t regno) +{ + BGE_TRACE(("bge_phydsp_read($%p, 0x%lx)", + (void *)bgep, regno)); + + ASSERT(mutex_owned(bgep->genlock)); + + bge_mii_put16(bgep, MII_DSP_ADDRESS, regno); + return bge_mii_get16(bgep, MII_DSP_RW_PORT); +} + +#pragma no_inline(bge_phydsp_write) + +void +bge_phydsp_write(bge_t *bgep, bge_regno_t regno, uint16_t data) +{ + BGE_TRACE(("bge_phydsp_write($%p, 0x%lx, 0x%x)", + (void *)bgep, regno, data)); + + ASSERT(mutex_owned(bgep->genlock)); + + bge_mii_put16(bgep, MII_DSP_ADDRESS, regno); + bge_mii_put16(bgep, MII_DSP_RW_PORT, data); +} + #undef BGE_DBG #define BGE_DBG BGE_DBG_SEEPROM /* debug flag for this code */ @@ -1720,6 +1799,7 @@ bge_nvmem_rw32(bge_t *bgep, uint32_t cmd, bge_regno_t addr, uint32_t *dp) if (DEVICE_5721_SERIES_CHIPSETS(bgep) || DEVICE_5723_SERIES_CHIPSETS(bgep) || DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep) || DEVICE_5714_SERIES_CHIPSETS(bgep)) { bge_reg_set32(bgep, NVM_ACCESS_REG, NVM_ACCESS_ENABLE); @@ -1729,6 +1809,7 @@ bge_nvmem_rw32(bge_t *bgep, uint32_t cmd, bge_regno_t addr, uint32_t *dp) if (DEVICE_5721_SERIES_CHIPSETS(bgep) || DEVICE_5723_SERIES_CHIPSETS(bgep) || DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep) || DEVICE_5714_SERIES_CHIPSETS(bgep)) { bge_reg_clr32(bgep, NVM_ACCESS_REG, NVM_ACCESS_ENABLE); @@ -1739,6 +1820,7 @@ bge_nvmem_rw32(bge_t *bgep, uint32_t cmd, bge_regno_t addr, uint32_t *dp) if (DEVICE_5721_SERIES_CHIPSETS(bgep) || DEVICE_5723_SERIES_CHIPSETS(bgep) || DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep) || DEVICE_5714_SERIES_CHIPSETS(bgep)) { bge_reg_set32(bgep, NVM_ACCESS_REG, NVM_WRITE_ENABLE|NVM_ACCESS_ENABLE); @@ -1750,6 +1832,7 @@ bge_nvmem_rw32(bge_t *bgep, uint32_t cmd, bge_regno_t addr, uint32_t *dp) if (DEVICE_5721_SERIES_CHIPSETS(bgep) || DEVICE_5723_SERIES_CHIPSETS(bgep) || DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep) || DEVICE_5714_SERIES_CHIPSETS(bgep)) { bge_reg_clr32(bgep, NVM_ACCESS_REG, NVM_WRITE_ENABLE|NVM_ACCESS_ENABLE); @@ -1768,6 +1851,42 @@ bge_nvmem_rw32(bge_t *bgep, uint32_t cmd, bge_regno_t addr, uint32_t *dp) return (err); } +static uint32_t +bge_nvmem_access_cmd(bge_t *bgep, boolean_t read) +{ + switch (bgep->chipid.nvtype) { + case BGE_NVTYPE_NONE: + case BGE_NVTYPE_UNKNOWN: + default: + return 0; + + case BGE_NVTYPE_SEEPROM: + case BGE_NVTYPE_LEGACY_SEEPROM: + return (read ? BGE_SEE_READ : BGE_SEE_WRITE); + + case BGE_NVTYPE_UNBUFFERED_FLASH: + case BGE_NVTYPE_BUFFERED_FLASH: + return (read ? BGE_FLASH_READ : BGE_FLASH_WRITE); + } +} + + +int +bge_nvmem_read32(bge_t *bgep, bge_regno_t addr, uint32_t *dp) +{ + return (bge_nvmem_rw32(bgep, bge_nvmem_access_cmd(bgep, B_TRUE), + addr, dp)); +} + + +int +bge_nvmem_write32(bge_t *bgep, bge_regno_t addr, uint32_t *dp) +{ + return (bge_nvmem_rw32(bgep, bge_nvmem_access_cmd(bgep, B_FALSE), + addr, dp)); +} + + /* * Attempt to get a MAC address from the SEEPROM or Flash, if any */ @@ -1896,7 +2015,11 @@ bge_nvmem_id(bge_t *bgep) case DEVICE_ID_5705_2: case DEVICE_ID_5717: case DEVICE_ID_5718: + case DEVICE_ID_5719: + case DEVICE_ID_5720: case DEVICE_ID_5724: + case DEVICE_ID_5725: + case DEVICE_ID_5727: case DEVICE_ID_57780: case DEVICE_ID_5780: case DEVICE_ID_5782: @@ -1942,6 +2065,453 @@ bge_nvmem_id(bge_t *bgep) } #undef BGE_DBG +#define BGE_DBG BGE_DBG_APE /* debug flag for this code */ + +uint32_t bge_ape_get32(bge_t *bgep, bge_regno_t regno); +#pragma inline(bge_ape_get32) + +uint32_t +bge_ape_get32(bge_t *bgep, bge_regno_t regno) +{ + BGE_TRACE(("bge_ape_get32($%p, 0x%lx)", + (void *)bgep, regno)); + + return (ddi_get32(bgep->ape_handle, APE_ADDR(bgep, regno))); +} + +void bge_ape_put32(bge_t *bgep, bge_regno_t regno, uint32_t data); +#pragma inline(bge_ape_put32) + +void +bge_ape_put32(bge_t *bgep, bge_regno_t regno, uint32_t data) +{ + BGE_TRACE(("bge_ape_put32($%p, 0x%lx, 0x%x)", + (void *)bgep, regno, data)); + + ddi_put32(bgep->ape_handle, APE_ADDR(bgep, regno), data); + BGE_PCICHK(bgep); +} + +void +bge_ape_lock_init(bge_t *bgep) +{ + int i; + uint32_t regbase; + uint32_t bit; + + BGE_TRACE(("bge_ape_lock_init($%p)", (void *)bgep)); + + if (bgep->chipid.device == DEVICE_ID_5761) + regbase = BGE_APE_LOCK_GRANT; + else + regbase = BGE_APE_PER_LOCK_GRANT; + + /* Make sure the driver hasn't any stale locks. */ + for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) { + switch (i) { + case BGE_APE_LOCK_PHY0: + case BGE_APE_LOCK_PHY1: + case BGE_APE_LOCK_PHY2: + case BGE_APE_LOCK_PHY3: + bit = APE_LOCK_GRANT_DRIVER; + break; + default: + if (!bgep->pci_func) + bit = APE_LOCK_GRANT_DRIVER; + else + bit = 1 << bgep->pci_func; + } + bge_ape_put32(bgep, regbase + 4 * i, bit); + } +} + +static int +bge_ape_lock(bge_t *bgep, int locknum) +{ + int i, off; + int ret = 0; + uint32_t status; + uint32_t req; + uint32_t gnt; + uint32_t bit; + + BGE_TRACE(("bge_ape_lock($%p, 0x%x)", (void *)bgep, locknum)); + + if (!bgep->ape_enabled) + return (0); + + switch (locknum) { + case BGE_APE_LOCK_GPIO: + if (bgep->chipid.device == DEVICE_ID_5761) + return (0); + case BGE_APE_LOCK_GRC: + case BGE_APE_LOCK_MEM: + if (!bgep->pci_func) + bit = APE_LOCK_REQ_DRIVER; + else + bit = 1 << bgep->pci_func; + break; + case BGE_APE_LOCK_PHY0: + case BGE_APE_LOCK_PHY1: + case BGE_APE_LOCK_PHY2: + case BGE_APE_LOCK_PHY3: + bit = APE_LOCK_REQ_DRIVER; + break; + default: + return (-1); + } + + if (bgep->chipid.device == DEVICE_ID_5761) { + req = BGE_APE_LOCK_REQ; + gnt = BGE_APE_LOCK_GRANT; + } else { + req = BGE_APE_PER_LOCK_REQ; + gnt = BGE_APE_PER_LOCK_GRANT; + } + + off = 4 * locknum; + + bge_ape_put32(bgep, req + off, bit); + + /* Wait for up to 1 millisecond to acquire lock. */ + for (i = 0; i < 100; i++) { + status = bge_ape_get32(bgep, gnt + off); + if (status == bit) + break; + drv_usecwait(10); + } + + if (status != bit) { + /* Revoke the lock request. */ + bge_ape_put32(bgep, gnt + off, bit); + ret = -1; + } + + return (ret); +} + +static void +bge_ape_unlock(bge_t *bgep, int locknum) +{ + uint32_t gnt; + uint32_t bit; + + BGE_TRACE(("bge_ape_unlock($%p, 0x%x)", (void *)bgep, locknum)); + + if (!bgep->ape_enabled) + return; + + switch (locknum) { + case BGE_APE_LOCK_GPIO: + if (bgep->chipid.device == DEVICE_ID_5761) + return; + case BGE_APE_LOCK_GRC: + case BGE_APE_LOCK_MEM: + if (!bgep->pci_func) + bit = APE_LOCK_GRANT_DRIVER; + else + bit = 1 << bgep->pci_func; + break; + case BGE_APE_LOCK_PHY0: + case BGE_APE_LOCK_PHY1: + case BGE_APE_LOCK_PHY2: + case BGE_APE_LOCK_PHY3: + bit = APE_LOCK_GRANT_DRIVER; + break; + default: + return; + } + + if (bgep->chipid.device == DEVICE_ID_5761) + gnt = BGE_APE_LOCK_GRANT; + else + gnt = BGE_APE_PER_LOCK_GRANT; + + bge_ape_put32(bgep, gnt + 4 * locknum, bit); +} + +/* wait for pending event to finish, if successful returns with MEM locked */ +static int +bge_ape_event_lock(bge_t *bgep, uint32_t timeout_us) +{ + uint32_t apedata; + + BGE_TRACE(("bge_ape_event_lock($%p, %d)", (void *)bgep, timeout_us)); + + ASSERT(timeout_us > 0); + + while (timeout_us) { + if (bge_ape_lock(bgep, BGE_APE_LOCK_MEM)) + return (-1); + + apedata = bge_ape_get32(bgep, BGE_APE_EVENT_STATUS); + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + break; + + bge_ape_unlock(bgep, BGE_APE_LOCK_MEM); + + drv_usecwait(10); + timeout_us -= (timeout_us > 10) ? 10 : timeout_us; + } + + return (timeout_us ? 0 : -1); +} + +/* wait for pending event to finish, returns non-zero if not finished */ +static int +bge_ape_wait_for_event(bge_t *bgep, uint32_t timeout_us) +{ + uint32_t i; + uint32_t apedata; + + BGE_TRACE(("bge_ape_wait_for_event($%p, %d)", (void *)bgep, timeout_us)); + + ASSERT(timeout_us > 0); + + for (i = 0; i < timeout_us / 10; i++) { + apedata = bge_ape_get32(bgep, BGE_APE_EVENT_STATUS); + + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + break; + + drv_usecwait(10); + } + + return (i == timeout_us / 10); +} + +int +bge_ape_scratchpad_read(bge_t *bgep, uint32_t *data, uint32_t base_off, + uint32_t lenToRead) +{ + int err; + uint32_t i; + uint32_t bufoff; + uint32_t msgoff; + uint32_t maxlen; + uint32_t apedata; + + BGE_TRACE(("bge_ape_scratchpad_read($%p, %p, 0x%0x, %d)", + (void *)bgep, (void*)data, base_off, lenToRead)); + + if (!bgep->ape_has_ncsi) + return (0); + + apedata = bge_ape_get32(bgep, BGE_APE_SEG_SIG); + if (apedata != APE_SEG_SIG_MAGIC) + return (-1); + + apedata = bge_ape_get32(bgep, BGE_APE_FW_STATUS); + if (!(apedata & APE_FW_STATUS_READY)) + return (-1); + + bufoff = (bge_ape_get32(bgep, BGE_APE_SEG_MSG_BUF_OFF) + + BGE_APE_SHMEM_BASE); + msgoff = bufoff + 2 * sizeof(uint32_t); + maxlen = bge_ape_get32(bgep, BGE_APE_SEG_MSG_BUF_LEN); + + while (lenToRead) { + uint32_t transferLen; + + /* Cap xfer sizes to scratchpad limits. */ + transferLen = (lenToRead > maxlen) ? maxlen : lenToRead; + lenToRead -= transferLen; + + apedata = bge_ape_get32(bgep, BGE_APE_FW_STATUS); + if (!(apedata & APE_FW_STATUS_READY)) + return (-1); + + /* Wait for up to 1 millisecond for APE to service previous event. */ + err = bge_ape_event_lock(bgep, 1000); + if (err) + return (err); + + apedata = (APE_EVENT_STATUS_DRIVER_EVNT | + APE_EVENT_STATUS_SCRTCHPD_READ | + APE_EVENT_STATUS_EVENT_PENDING); + bge_ape_put32(bgep, BGE_APE_EVENT_STATUS, apedata); + + bge_ape_put32(bgep, bufoff, base_off); + bge_ape_put32(bgep, bufoff + sizeof(uint32_t), transferLen); + + bge_ape_unlock(bgep, BGE_APE_LOCK_MEM); + bge_ape_put32(bgep, BGE_APE_EVENT, APE_EVENT_1); + + base_off += transferLen; + + if (bge_ape_wait_for_event(bgep, 30000)) + return (-1); + + for (i = 0; transferLen; i += 4, transferLen -= 4) { + uint32_t val = bge_ape_get32(bgep, msgoff + i); + memcpy(data, &val, sizeof(uint32_t)); + data++; + } + } + + return (0); +} + +int +bge_ape_scratchpad_write(bge_t *bgep, uint32_t dstoff, uint32_t *data, + uint32_t lenToWrite) +{ + int err; + uint32_t i; + uint32_t bufoff; + uint32_t msgoff; + uint32_t maxlen; + uint32_t apedata; + + BGE_TRACE(("bge_ape_scratchpad_write($%p, %d, %p, %d)", + (void *)bgep, dstoff, data, lenToWrite)); + + if (!bgep->ape_has_ncsi) + return (0); + + apedata = bge_ape_get32(bgep, BGE_APE_SEG_SIG); + if (apedata != APE_SEG_SIG_MAGIC) + return (-1); + + apedata = bge_ape_get32(bgep, BGE_APE_FW_STATUS); + if (!(apedata & APE_FW_STATUS_READY)) + return (-1); + + bufoff = (bge_ape_get32(bgep, BGE_APE_SEG_MSG_BUF_OFF) + + BGE_APE_SHMEM_BASE); + msgoff = bufoff + 2 * sizeof(uint32_t); + maxlen = bge_ape_get32(bgep, BGE_APE_SEG_MSG_BUF_LEN); + + while (lenToWrite) { + uint32_t transferLen; + + /* Cap xfer sizes to scratchpad limits. */ + transferLen = (lenToWrite > maxlen) ? maxlen : lenToWrite; + lenToWrite -= transferLen; + + /* Wait for up to 1 millisecond for + * APE to service previous event. + */ + err = bge_ape_event_lock(bgep, 1000); + if (err) + return (err); + + bge_ape_put32(bgep, bufoff, dstoff); + bge_ape_put32(bgep, bufoff + sizeof(uint32_t), transferLen); + apedata = msgoff; + + dstoff += transferLen; + + for (i = 0; transferLen; i += 4, transferLen -= 4) { + bge_ape_put32(bgep, apedata, *data++); + apedata += sizeof(uint32_t); + } + + apedata = (APE_EVENT_STATUS_DRIVER_EVNT | + APE_EVENT_STATUS_SCRTCHPD_WRITE | + APE_EVENT_STATUS_EVENT_PENDING); + bge_ape_put32(bgep, BGE_APE_EVENT_STATUS, apedata); + + bge_ape_unlock(bgep, BGE_APE_LOCK_MEM); + bge_ape_put32(bgep, BGE_APE_EVENT, APE_EVENT_1); + } + + return (0); +} + +static int +bge_ape_send_event(bge_t *bgep, uint32_t event) +{ + int err; + uint32_t apedata; + + BGE_TRACE(("bge_ape_send_event($%p, %d)", (void *)bgep, event)); + + apedata = bge_ape_get32(bgep, BGE_APE_SEG_SIG); + if (apedata != APE_SEG_SIG_MAGIC) + return (-1); + + apedata = bge_ape_get32(bgep, BGE_APE_FW_STATUS); + if (!(apedata & APE_FW_STATUS_READY)) + return (-1); + + /* Wait for up to 1 millisecond for APE to service previous event. */ + err = bge_ape_event_lock(bgep, 1000); + if (err) + return (err); + + bge_ape_put32(bgep, BGE_APE_EVENT_STATUS, + event | APE_EVENT_STATUS_EVENT_PENDING); + + bge_ape_unlock(bgep, BGE_APE_LOCK_MEM); + bge_ape_put32(bgep, BGE_APE_EVENT, APE_EVENT_1); + + return 0; +} + +static void +bge_ape_driver_state_change(bge_t *bgep, int mode) +{ + uint32_t event; + uint32_t apedata; + + BGE_TRACE(("bge_ape_driver_state_change($%p, %d)", + (void *)bgep, mode)); + + if (!bgep->ape_enabled) + return; + + switch (mode) { + case BGE_INIT_RESET: + bge_ape_put32(bgep, BGE_APE_HOST_SEG_SIG, + APE_HOST_SEG_SIG_MAGIC); + bge_ape_put32(bgep, BGE_APE_HOST_SEG_LEN, + APE_HOST_SEG_LEN_MAGIC); + apedata = bge_ape_get32(bgep, BGE_APE_HOST_INIT_COUNT); + bge_ape_put32(bgep, BGE_APE_HOST_INIT_COUNT, ++apedata); + bge_ape_put32(bgep, BGE_APE_HOST_DRIVER_ID, + APE_HOST_DRIVER_ID_MAGIC(1, 0)); + bge_ape_put32(bgep, BGE_APE_HOST_BEHAVIOR, + APE_HOST_BEHAV_NO_PHYLOCK); + bge_ape_put32(bgep, BGE_APE_HOST_DRVR_STATE, + BGE_APE_HOST_DRVR_STATE_START); + + event = APE_EVENT_STATUS_STATE_START; + break; + case BGE_SHUTDOWN_RESET: + /* With the interface we are currently using, + * APE does not track driver state. Wiping + * out the HOST SEGMENT SIGNATURE forces + * the APE to assume OS absent status. + */ + bge_ape_put32(bgep, BGE_APE_HOST_SEG_SIG, 0x0); + +#if 0 + if (WOL supported) { + bge_ape_put32(bgep, BGE_APE_HOST_WOL_SPEED, + BGE_APE_HOST_WOL_SPEED_AUTO); + apedata = BGE_APE_HOST_DRVR_STATE_WOL; + } else +#endif + apedata = BGE_APE_HOST_DRVR_STATE_UNLOAD; + + bge_ape_put32(bgep, BGE_APE_HOST_DRVR_STATE, apedata); + + event = APE_EVENT_STATUS_STATE_UNLOAD; + break; + case BGE_SUSPEND_RESET: + event = APE_EVENT_STATUS_STATE_SUSPEND; + break; + default: + return; + } + + event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; + + bge_ape_send_event(bgep, event); +} + +#undef BGE_DBG #define BGE_DBG BGE_DBG_CHIP /* debug flag for this code */ static void @@ -1973,7 +2543,6 @@ int bge_chip_id_init(bge_t *bgep) { char buf[MAXPATHLEN]; /* any risk of stack overflow? */ - boolean_t sys_ok; boolean_t dev_ok; chip_id_t *cidp; uint32_t subid; @@ -1983,7 +2552,7 @@ bge_chip_id_init(bge_t *bgep) int err; uint_t i; - sys_ok = dev_ok = B_FALSE; + dev_ok = B_FALSE; cidp = &bgep->chipid; /* @@ -2026,13 +2595,31 @@ bge_chip_id_init(bge_t *bgep) switch (cidp->device) { case DEVICE_ID_5717: case DEVICE_ID_5718: + case DEVICE_ID_5719: + case DEVICE_ID_5720: case DEVICE_ID_5724: - if (cidp->device == DEVICE_ID_5717) + case DEVICE_ID_5725: + case DEVICE_ID_5727: + if (cidp->device == DEVICE_ID_5717) { cidp->chip_label = 5717; - else if (cidp->device == DEVICE_ID_5718) + } else if (cidp->device == DEVICE_ID_5718) { cidp->chip_label = 5718; - else + } else if (cidp->device == DEVICE_ID_5719) { + cidp->chip_label = 5719; + } else if (cidp->device == DEVICE_ID_5720) { + if (pci_config_get16(bgep->cfg_handle, PCI_CONF_DEVID) == + DEVICE_ID_5717_C0) { + cidp->chip_label = 5717; + } else { + cidp->chip_label = 5720; + } + } else if (cidp->device == DEVICE_ID_5724) { cidp->chip_label = 5724; + } else if (cidp->device == DEVICE_ID_5725) { + cidp->chip_label = 5725; + } else /* (cidp->device == DEVICE_ID_5727) */ { + cidp->chip_label = 5727; + } cidp->msi_enabled = bge_enable_msi; #ifdef __sparc cidp->mask_pci_int = LE_32(MHCR_MASK_PCI_INT_OUTPUT); @@ -2048,7 +2635,6 @@ bge_chip_id_init(bge_t *bgep) cidp->bge_mlcr_default = MLCR_DEFAULT_5717; cidp->rx_rings = BGE_RECV_RINGS_MAX_5705; cidp->tx_rings = BGE_SEND_RINGS_MAX_5705; - cidp->flags |= CHIP_FLAG_NO_JUMBO; cidp->statistic_type = BGE_STAT_REG; dev_ok = B_TRUE; break; @@ -2451,10 +3037,15 @@ bge_chip_id_init(bge_t *bgep) * For BCM5714/5715, there is only one standard receive ring. So the * std buffer size should be set to BGE_JUMBO_BUFF_SIZE when jumbo * feature is enabled. + * + * For the BCM5718 family we hijack the standard receive ring for + * the jumboframe traffic, keeps it simple. */ if (!(cidp->flags & CHIP_FLAG_NO_JUMBO) && (cidp->default_mtu > BGE_DEFAULT_MTU)) { - if (DEVICE_5714_SERIES_CHIPSETS(bgep)) { + if (DEVICE_5714_SERIES_CHIPSETS(bgep) || + DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) { cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5714_JUMBO; cidp->mbuf_lo_water_rmac = @@ -2482,57 +3073,6 @@ bge_chip_id_init(bge_t *bgep) cidp->nvtype = bge_nvmem_id(bgep); /* - * Now, we want to check whether this device is part of a - * supported subsystem (e.g., on the motherboard of a Sun - * branded platform). - * - * Rule 1: If the Subsystem Vendor ID is "Sun", then it's OK ;-) - */ - if (cidp->subven == VENDOR_ID_SUN) - sys_ok = B_TRUE; - - /* - * Rule 2: If it's on the list on known subsystems, then it's OK. - * Note: 0x14e41647 should *not* appear in the list, but the code - * doesn't enforce that. - */ - err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo, - DDI_PROP_DONTPASS, knownids_propname, &ids, &i); - if (err == DDI_PROP_SUCCESS) { - /* - * Got the list; scan for a matching subsystem vendor/device - */ - subid = (cidp->subven << 16) | cidp->subdev; - while (i--) - if (ids[i] == subid) - sys_ok = B_TRUE; - ddi_prop_free(ids); - } - - /* - * Rule 3: If it's a Taco/ENWS motherboard device, then it's OK - * - * Unfortunately, early SunBlade 1500s and 2500s didn't reprogram - * the Subsystem Vendor ID, so it defaults to Broadcom. Therefore, - * we have to check specially for the exact device paths to the - * motherboard devices on those platforms ;-( - * - * Note: we can't just use the "supported-subsystems" mechanism - * above, because the entry would have to be 0x14e41647 -- which - * would then accept *any* plugin card that *didn't* contain a - * (valid) SEEPROM ;-( - */ - sysname = ddi_node_name(ddi_root_node()); - devname = ddi_pathname(bgep->devinfo, buf); - ASSERT(strlen(devname) > 0); - if (strcmp(sysname, "SUNW,Sun-Blade-1500") == 0) /* Taco */ - if (strcmp(devname, "/pci@1f,700000/network@2") == 0) - sys_ok = B_TRUE; - if (strcmp(sysname, "SUNW,Sun-Blade-2500") == 0) /* ENWS */ - if (strcmp(devname, "/pci@1c,600000/network@3") == 0) - sys_ok = B_TRUE; - - /* * Now check what we've discovered: is this truly a supported * chip on (the motherboard of) a supported platform? * @@ -2551,16 +3091,12 @@ bge_chip_id_init(bge_t *bgep) "Device 'pci%04x,%04x' (%d) revision %d not supported", cidp->vendor, cidp->device, cidp->chip_label, cidp->revision); -#if BGE_DEBUGGING - else if (!sys_ok) - bge_problem(bgep, - "%d-based subsystem 'pci%04x,%04x' not validated", - cidp->chip_label, cidp->subven, cidp->subdev); -#endif else cidp->flags |= CHIP_FLAG_SUPPORTED; + if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) return (EIO); + return (0); } @@ -2643,7 +3179,9 @@ static boolean_t bge_chip_reset_engine(bge_t *bgep, bge_regno_t regno) { uint32_t regval; + uint16_t val16; uint32_t val32; + uint32_t mhcr; regval = bge_reg_get32(bgep, regno); @@ -2663,11 +3201,12 @@ bge_chip_reset_engine(bge_t *bgep, bge_regno_t regno) * while the reset bit is written. * See:P500 of 57xx-PG102-RDS.pdf. */ - if (DEVICE_5705_SERIES_CHIPSETS(bgep)|| - DEVICE_5717_SERIES_CHIPSETS(bgep)|| - DEVICE_5721_SERIES_CHIPSETS(bgep)|| - DEVICE_5723_SERIES_CHIPSETS(bgep)|| - DEVICE_5714_SERIES_CHIPSETS(bgep)|| + if (DEVICE_5705_SERIES_CHIPSETS(bgep) || + DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep) || + DEVICE_5721_SERIES_CHIPSETS(bgep) || + DEVICE_5723_SERIES_CHIPSETS(bgep) || + DEVICE_5714_SERIES_CHIPSETS(bgep) || DEVICE_5906_SERIES_CHIPSETS(bgep)) { regval |= MISC_CONFIG_GPHY_POWERDOWN_OVERRIDE; if (bgep->chipid.pci_type == BGE_PCI_E) { @@ -2727,6 +3266,14 @@ bge_chip_reset_engine(bge_t *bgep, bge_regno_t regno) /* PCI-E device need more reset time */ drv_usecwait(120000); + /* + * (re)Disable interrupts as the bit can be reset after a + * core clock reset. + */ + mhcr = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_MHCR); + pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, + mhcr | MHCR_MASK_PCI_INT_OUTPUT); + /* Set PCIE max payload size and clear error status. */ if ((bgep->chipid.chip_label == 5721) || (bgep->chipid.chip_label == 5751) || @@ -2746,6 +3293,16 @@ bge_chip_reset_engine(bge_t *bgep, bge_regno_t regno) pci_config_put16(bgep->cfg_handle, PCI_CONF_DEV_STUS_5723, DEVICE_ERROR_STUS); } + + if (DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) { + val16 = pci_config_get16(bgep->cfg_handle, + PCI_CONF_DEV_CTRL_5717); + val16 &= ~READ_REQ_SIZE_MASK; + val16 |= READ_REQ_SIZE_2K; + pci_config_put16(bgep->cfg_handle, + PCI_CONF_DEV_CTRL_5717, val16); + } } BGE_PCICHK(bgep); @@ -2802,13 +3359,27 @@ bge_chip_disable_engine(bge_t *bgep, bge_regno_t regno, uint32_t morebits) return (B_TRUE); default: - regval = bge_reg_get32(bgep, regno); - regval &= ~STATE_MACHINE_ENABLE_BIT; - regval &= ~morebits; - bge_reg_put32(bgep, regno, regval); - return (bge_chip_poll_engine(bgep, regno, - STATE_MACHINE_ENABLE_BIT, 0)); + if (DEVICE_5704_SERIES_CHIPSETS(bgep)) { + break; + } + + if ((regno == RCV_LIST_SELECTOR_MODE_REG) || + (regno == DMA_COMPLETION_MODE_REG) || + (regno == MBUF_CLUSTER_FREE_MODE_REG) || + (regno == BUFFER_MANAGER_MODE_REG) || + (regno == MEMORY_ARBITER_MODE_REG)) { + return B_TRUE; + } + + break; } + + regval = bge_reg_get32(bgep, regno); + regval &= ~STATE_MACHINE_ENABLE_BIT; + regval &= ~morebits; + bge_reg_put32(bgep, regno, regval); + + return bge_chip_poll_engine(bgep, regno, STATE_MACHINE_ENABLE_BIT, 0); } /* @@ -2880,18 +3451,13 @@ bge_sync_mac_modes(bge_t *bgep) * Reprogram the Ethernet MAC mode ... */ macmode = regval = bge_reg_get32(bgep, ETHERNET_MAC_MODE_REG); - if ((bgep->chipid.flags & CHIP_FLAG_SERDES) && - (bgep->param_loop_mode != BGE_LOOP_INTERNAL_MAC)) - if (DEVICE_5714_SERIES_CHIPSETS(bgep)) - macmode |= ETHERNET_MODE_LINK_POLARITY; - else - macmode &= ~ETHERNET_MODE_LINK_POLARITY; - else - macmode |= ETHERNET_MODE_LINK_POLARITY; + macmode &= ~ETHERNET_MODE_LINK_POLARITY; macmode &= ~ETHERNET_MODE_PORTMODE_MASK; if ((bgep->chipid.flags & CHIP_FLAG_SERDES) && (bgep->param_loop_mode != BGE_LOOP_INTERNAL_MAC)) { - if (DEVICE_5714_SERIES_CHIPSETS(bgep)) + if (DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep) || + DEVICE_5714_SERIES_CHIPSETS(bgep)) macmode |= ETHERNET_MODE_PORTMODE_GMII; else macmode |= ETHERNET_MODE_PORTMODE_TBI; @@ -3102,38 +3668,6 @@ bge_chip_sync(bge_t *bgep) return (retval); } -/* - * This array defines the sequence of state machine control registers - * in which the <enable> bit must be cleared to bring the chip to a - * clean stop. Taken from Broadcom document 570X-PG102-R, p116. - */ -static bge_regno_t shutdown_engine_regs[] = { - RECEIVE_MAC_MODE_REG, - RCV_BD_INITIATOR_MODE_REG, - RCV_LIST_PLACEMENT_MODE_REG, - RCV_LIST_SELECTOR_MODE_REG, /* BCM5704 series only */ - RCV_DATA_BD_INITIATOR_MODE_REG, - RCV_DATA_COMPLETION_MODE_REG, - RCV_BD_COMPLETION_MODE_REG, - - SEND_BD_SELECTOR_MODE_REG, - SEND_BD_INITIATOR_MODE_REG, - SEND_DATA_INITIATOR_MODE_REG, - READ_DMA_MODE_REG, - SEND_DATA_COMPLETION_MODE_REG, - DMA_COMPLETION_MODE_REG, /* BCM5704 series only */ - SEND_BD_COMPLETION_MODE_REG, - TRANSMIT_MAC_MODE_REG, - - HOST_COALESCE_MODE_REG, - WRITE_DMA_MODE_REG, - MBUF_CLUSTER_FREE_MODE_REG, /* BCM5704 series only */ - FTQ_RESET_REG, /* special - see code */ - BUFFER_MANAGER_MODE_REG, /* BCM5704 series only */ - MEMORY_ARBITER_MODE_REG, /* BCM5704 series only */ - BGE_REGNO_NONE /* terminator */ -}; - #ifndef __sparc static bge_regno_t quiesce_regs[] = { READ_DMA_MODE_REG, @@ -3185,30 +3719,44 @@ bge_chip_stop(bge_t *bgep, boolean_t fault) { bge_regno_t regno; bge_regno_t *rbp; - boolean_t ok; + boolean_t ok = B_TRUE; BGE_TRACE(("bge_chip_stop($%p)", (void *)bgep)); ASSERT(mutex_owned(bgep->genlock)); - rbp = shutdown_engine_regs; - /* - * When driver try to shutdown the BCM5705/5788/5721/5751/ - * 5752/5714 and 5715 chipsets,the buffer manager and the mem - * -ory arbiter should not be disabled. - */ - for (ok = B_TRUE; (regno = *rbp) != BGE_REGNO_NONE; ++rbp) { - if (DEVICE_5704_SERIES_CHIPSETS(bgep)) - ok &= bge_chip_disable_engine(bgep, regno, 0); - else if ((regno != RCV_LIST_SELECTOR_MODE_REG) && - (regno != DMA_COMPLETION_MODE_REG) && - (regno != MBUF_CLUSTER_FREE_MODE_REG)&& - (regno != BUFFER_MANAGER_MODE_REG) && - (regno != MEMORY_ARBITER_MODE_REG)) - ok &= bge_chip_disable_engine(bgep, - regno, 0); - } + pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, + (pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_MHCR) | + MHCR_MASK_PCI_INT_OUTPUT)); + + ok &= bge_chip_disable_engine(bgep, RECEIVE_MAC_MODE_REG, 0); + ok &= bge_chip_disable_engine(bgep, RCV_BD_INITIATOR_MODE_REG, 0); + ok &= bge_chip_disable_engine(bgep, RCV_LIST_PLACEMENT_MODE_REG, 0); + ok &= bge_chip_disable_engine(bgep, RCV_LIST_SELECTOR_MODE_REG, 0); + ok &= bge_chip_disable_engine(bgep, RCV_DATA_BD_INITIATOR_MODE_REG, 0); + ok &= bge_chip_disable_engine(bgep, RCV_DATA_COMPLETION_MODE_REG, 0); + ok &= bge_chip_disable_engine(bgep, RCV_BD_COMPLETION_MODE_REG, 0); + + ok &= bge_chip_disable_engine(bgep, SEND_BD_SELECTOR_MODE_REG, 0); + ok &= bge_chip_disable_engine(bgep, SEND_BD_INITIATOR_MODE_REG, 0); + ok &= bge_chip_disable_engine(bgep, SEND_DATA_INITIATOR_MODE_REG, 0); + ok &= bge_chip_disable_engine(bgep, READ_DMA_MODE_REG, 0); + ok &= bge_chip_disable_engine(bgep, SEND_DATA_COMPLETION_MODE_REG, 0); + ok &= bge_chip_disable_engine(bgep, DMA_COMPLETION_MODE_REG, 0); + ok &= bge_chip_disable_engine(bgep, SEND_BD_COMPLETION_MODE_REG, 0); + ok &= bge_chip_disable_engine(bgep, TRANSMIT_MAC_MODE_REG, 0); + + bge_reg_clr32(bgep, ETHERNET_MAC_MODE_REG, ETHERNET_MODE_ENABLE_TDE); + drv_usecwait(40); + + ok &= bge_chip_disable_engine(bgep, HOST_COALESCE_MODE_REG, 0); + ok &= bge_chip_disable_engine(bgep, WRITE_DMA_MODE_REG, 0); + ok &= bge_chip_disable_engine(bgep, MBUF_CLUSTER_FREE_MODE_REG, 0); + ok &= bge_chip_disable_engine(bgep, FTQ_RESET_REG, 0); + ok &= bge_chip_disable_engine(bgep, BUFFER_MANAGER_MODE_REG, 0); + ok &= bge_chip_disable_engine(bgep, MEMORY_ARBITER_MODE_REG, 0); + ok &= bge_chip_disable_engine(bgep, MEMORY_ARBITER_MODE_REG, 0); if (!ok && !fault) ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED); @@ -3295,8 +3843,7 @@ bge_poll_firmware(bge_t *bgep) * GENCOMM word as "the upper half of a 64-bit quantity" makes * it work correctly on both big- and little-endian hosts. */ - if (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) == - MHCR_CHIP_ASIC_REV_5906) { + if (MHCR_CHIP_ASIC_REV(bgep) == MHCR_CHIP_ASIC_REV_5906) { for (i = 0; i < 1000; ++i) { drv_usecwait(1000); val = bge_reg_get32(bgep, VCPU_STATUS_REG); @@ -3310,6 +3857,9 @@ bge_poll_firmware(bge_t *bgep) for (i = 0; i < 1000; ++i) { drv_usecwait(1000); gen = bge_nic_get64(bgep, NIC_MEM_GENCOMM) >> 32; + if (i == 0 && DEVICE_5704_SERIES_CHIPSETS(bgep)) + drv_usecwait(100000); + mac = bge_reg_get64(bgep, MAC_ADDRESS_REG(0)); #ifdef BGE_IPMI_ASF if (!bgep->asf_enabled) { #endif @@ -3318,7 +3868,6 @@ bge_poll_firmware(bge_t *bgep) #ifdef BGE_IPMI_ASF } #endif - mac = bge_reg_get64(bgep, MAC_ADDRESS_REG(0)); if (mac != 0ULL) break; if (bgep->bge_chip_state != BGE_CHIP_INITIAL) @@ -3358,7 +3907,8 @@ bge_chip_reset(bge_t *bgep, boolean_t enable_dma) chip_id_t chipid; uint64_t mac; uint64_t magic; - uint32_t modeflags; + uint32_t tmp; + uint32_t mhcr_base; uint32_t mhcr; uint32_t sx0; uint32_t i, tries; @@ -3395,24 +3945,26 @@ bge_chip_reset(bge_t *bgep, boolean_t enable_dma) break; } + mhcr_base = MHCR_ENABLE_INDIRECT_ACCESS | + MHCR_ENABLE_PCI_STATE_RW | + MHCR_ENABLE_TAGGED_STATUS_MODE | + MHCR_MASK_INTERRUPT_MODE | + MHCR_MASK_PCI_INT_OUTPUT | + MHCR_CLEAR_INTERRUPT_INTA; + #ifdef BGE_IPMI_ASF if (bgep->asf_enabled) { -#ifdef __sparc - mhcr = MHCR_ENABLE_INDIRECT_ACCESS | - MHCR_ENABLE_TAGGED_STATUS_MODE | - MHCR_MASK_INTERRUPT_MODE | - MHCR_MASK_PCI_INT_OUTPUT | - MHCR_CLEAR_INTERRUPT_INTA | - MHCR_ENABLE_ENDIAN_WORD_SWAP | - MHCR_ENABLE_ENDIAN_BYTE_SWAP; - if (DEVICE_5717_SERIES_CHIPSETS(bgep)) - pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, - 0); + mhcr = mhcr_base; +#ifdef _BIG_ENDIAN + mhcr |= (MHCR_ENABLE_ENDIAN_WORD_SWAP | + MHCR_ENABLE_ENDIAN_BYTE_SWAP); +#endif pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcr); + bge_reg_put32(bgep, MEMORY_ARBITER_MODE_REG, bge_reg_get32(bgep, MEMORY_ARBITER_MODE_REG) | MEMORY_ARBITER_ENABLE); -#endif + if (asf_mode == ASF_MODE_INIT) { bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET); } else if (asf_mode == ASF_MODE_SHUTDOWN) { @@ -3420,6 +3972,7 @@ bge_chip_reset(bge_t *bgep, boolean_t enable_dma) } } #endif + /* * Adapted from Broadcom document 570X-PG102-R, pp 102-116. * Updated to reflect Broadcom document 570X-PG104-R, pp 146-159. @@ -3434,17 +3987,13 @@ bge_chip_reset(bge_t *bgep, boolean_t enable_dma) if (!bge_chip_enable_engine(bgep, MEMORY_ARBITER_MODE_REG, 0)) retval = DDI_FAILURE; - mhcr = MHCR_ENABLE_INDIRECT_ACCESS | - MHCR_ENABLE_TAGGED_STATUS_MODE | - MHCR_MASK_INTERRUPT_MODE | - MHCR_MASK_PCI_INT_OUTPUT | - MHCR_CLEAR_INTERRUPT_INTA; -#ifdef _BIG_ENDIAN - mhcr |= MHCR_ENABLE_ENDIAN_WORD_SWAP | MHCR_ENABLE_ENDIAN_BYTE_SWAP; -#endif /* _BIG_ENDIAN */ - if (DEVICE_5717_SERIES_CHIPSETS(bgep)) - pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, 0); + mhcr = mhcr_base; +#ifdef _BIG_ENDIAN + mhcr |= (MHCR_ENABLE_ENDIAN_WORD_SWAP | + MHCR_ENABLE_ENDIAN_BYTE_SWAP); +#endif pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcr); + #ifdef BGE_IPMI_ASF if (bgep->asf_enabled) bgep->asf_wordswapped = B_FALSE; @@ -3459,6 +4008,8 @@ bge_chip_reset(bge_t *bgep, boolean_t enable_dma) BGE_DEBUG(("%s: fail to acquire nvram lock", bgep->ifname)); + bge_ape_lock(bgep, BGE_APE_LOCK_GRC); + #ifdef BGE_IPMI_ASF if (!bgep->asf_enabled) { #endif @@ -3468,8 +4019,23 @@ bge_chip_reset(bge_t *bgep, boolean_t enable_dma) } #endif + if (DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) { + bge_reg_set32(bgep, FAST_BOOT_PC, 0); + if (!bge_chip_enable_engine(bgep, MEMORY_ARBITER_MODE_REG, 0)) + retval = DDI_FAILURE; + } + + mhcr = mhcr_base; +#ifdef _BIG_ENDIAN + mhcr |= (MHCR_ENABLE_ENDIAN_WORD_SWAP | + MHCR_ENABLE_ENDIAN_BYTE_SWAP); +#endif + pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcr); + if (!bge_chip_reset_engine(bgep, MISC_CONFIG_REG)) retval = DDI_FAILURE; + bge_chip_cfg_init(bgep, &chipid, enable_dma); /* @@ -3485,7 +4051,6 @@ bge_chip_reset(bge_t *bgep, boolean_t enable_dma) (bgep->chipid.chip_label == 5906)) bge_reg_set32(bgep, TLP_CONTROL_REG, TLP_DATA_FIFO_PROTECT); - /* * Step 9: enable MAC memory arbiter,bit30 and bit31 of 5714/5715 should * not be changed. @@ -3502,17 +4067,15 @@ bge_chip_reset(bge_t *bgep, boolean_t enable_dma) * Steps 14-15: Configure DMA endianness options. See * the comments on the setting of the MHCR above. */ -#ifdef _BIG_ENDIAN - modeflags = MODE_WORD_SWAP_FRAME | MODE_BYTE_SWAP_FRAME | - MODE_WORD_SWAP_NONFRAME | MODE_BYTE_SWAP_NONFRAME; -#else - modeflags = MODE_WORD_SWAP_FRAME | MODE_BYTE_SWAP_FRAME; -#endif /* _BIG_ENDIAN */ + tmp = MODE_WORD_SWAP_FRAME | MODE_BYTE_SWAP_FRAME; +#ifdef _BIG_ENDIAN + tmp |= (MODE_WORD_SWAP_NONFRAME | MODE_BYTE_SWAP_NONFRAME); +#endif #ifdef BGE_IPMI_ASF if (bgep->asf_enabled) - modeflags |= MODE_HOST_STACK_UP; + tmp |= MODE_HOST_STACK_UP; #endif - bge_reg_put32(bgep, MODE_CONTROL_REG, modeflags); + bge_reg_put32(bgep, MODE_CONTROL_REG, tmp); #ifdef BGE_IPMI_ASF if (bgep->asf_enabled) { @@ -3564,11 +4127,20 @@ bge_chip_reset(bge_t *bgep, boolean_t enable_dma) #endif } #endif + + bge_ape_unlock(bgep, BGE_APE_LOCK_GRC); + /* * Steps 16-17: poll for firmware completion */ mac = bge_poll_firmware(bgep); + if (bgep->chipid.device == DEVICE_ID_5720) { + tmp = bge_reg_get32(bgep, CPMU_CLCK_ORIDE_REG); + bge_reg_put32(bgep, CPMU_CLCK_ORIDE_REG, + (tmp & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN)); + } + /* * Step 18: enable external memory -- doesn't apply. * @@ -3585,12 +4157,24 @@ bge_chip_reset(bge_t *bgep, boolean_t enable_dma) */ bge_reg_put32(bgep, MISC_LOCAL_CONTROL_REG, bgep->chipid.bge_mlcr_default); + + if ((bgep->chipid.flags & CHIP_FLAG_SERDES) && + DEVICE_5714_SERIES_CHIPSETS(bgep)) { + tmp = bge_reg_get32(bgep, SERDES_RX_CONTROL); + tmp |= SERDES_RX_CONTROL_SIG_DETECT; + bge_reg_put32(bgep, SERDES_RX_CONTROL, tmp); + } + bge_reg_set32(bgep, SERIAL_EEPROM_ADDRESS_REG, SEEPROM_ACCESS_INIT); /* * Step 20: clear the Ethernet MAC mode register */ - bge_reg_put32(bgep, ETHERNET_MAC_MODE_REG, 0); + if (bgep->ape_enabled) + bge_reg_put32(bgep, ETHERNET_MAC_MODE_REG, + ETHERNET_MODE_APE_TX_EN | ETHERNET_MODE_APE_RX_EN); + else + bge_reg_put32(bgep, ETHERNET_MAC_MODE_REG, 0); /* * Step 21: restore cache-line-size, latency timer, and @@ -3702,8 +4286,11 @@ bge_chip_start(bge_t *bgep, boolean_t reset_phys) uint32_t stats_mask; uint32_t dma_wrprio; uint64_t ring; + uint32_t reg; uint32_t regval; + uint32_t mhcr; int retval = DDI_SUCCESS; + int i; BGE_TRACE(("bge_chip_start($%p)", (void *)bgep)); @@ -3711,6 +4298,22 @@ bge_chip_start(bge_t *bgep, boolean_t reset_phys) ASSERT(mutex_owned(bgep->genlock)); ASSERT(bgep->bge_chip_state == BGE_CHIP_RESET); + /* Initialize EEE, enable MAC control of LPI */ + bge_eee_init(bgep); + + if (bgep->ape_enabled) { + /* + * Allow reads and writes to the + * APE register and memory space. + */ + regval = pci_config_get32(bgep->cfg_handle, + PCI_CONF_BGE_PCISTATE); + regval |= PCISTATE_ALLOW_APE_CTLSPC_WR | + PCISTATE_ALLOW_APE_SHMEM_WR | PCISTATE_ALLOW_APE_PSPACE_WR; + pci_config_put32(bgep->cfg_handle, + PCI_CONF_BGE_PCISTATE, regval); + } + /* * Taken from Broadcom document 570X-PG102-R, pp 102-116. * The document specifies 95 separate steps to fully @@ -3746,6 +4349,37 @@ bge_chip_start(bge_t *bgep, boolean_t reset_phys) MODE_HOST_SEND_BDS | MODE_HOST_STACK_UP); + if (DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) { + reg = (CHIP_ASIC_REV(bgep) == CHIP_ASIC_REV_5762) + ? RDMA_RSRV_CTRL_REG2 : RDMA_RSRV_CTRL_REG; + regval = bge_reg_get32(bgep, reg); + if ((bgep->chipid.device == DEVICE_ID_5719) || + (bgep->chipid.device == DEVICE_ID_5720) || + (CHIP_ASIC_REV(bgep) == CHIP_ASIC_REV_5762)) { + regval &= ~(RDMA_RSRV_CTRL_TXMRGN_MASK | + RDMA_RSRV_CTRL_FIFO_LWM_MASK | + RDMA_RSRV_CTRL_FIFO_HWM_MASK); + regval |= (RDMA_RSRV_CTRL_TXMRGN_320B | + RDMA_RSRV_CTRL_FIFO_LWM_1_5K | + RDMA_RSRV_CTRL_FIFO_HWM_1_5K); + } + /* Enable the DMA FIFO Overrun fix. */ + bge_reg_put32(bgep, reg, + (regval | RDMA_RSRV_CTRL_FIFO_OFLW_FIX)); + + if ((CHIP_ASIC_REV(bgep) == CHIP_ASIC_REV_5719) || + (CHIP_ASIC_REV(bgep) == CHIP_ASIC_REV_5720) || + (CHIP_ASIC_REV(bgep) == CHIP_ASIC_REV_5762)) { + reg = (CHIP_ASIC_REV(bgep) == CHIP_ASIC_REV_5762) + ? RDMA_CORR_CTRL_REG2 : RDMA_CORR_CTRL_REG; + regval = bge_reg_get32(bgep, reg); + bge_reg_put32(bgep, reg, (regval | + RDMA_CORR_CTRL_BLEN_BD_4K | + RDMA_CORR_CTRL_BLEN_LSO_4K)); + } + } + /* * Step 28: Configure checksum options: * Solaris supports the hardware default checksum options. @@ -3818,16 +4452,23 @@ bge_chip_start(bge_t *bgep, boolean_t reset_phys) /* * Steps 34-36: enable buffer manager & internal h/w queues */ - if (!bge_chip_enable_engine(bgep, BUFFER_MANAGER_MODE_REG, - STATE_MACHINE_ATTN_ENABLE_BIT)) + regval = STATE_MACHINE_ATTN_ENABLE_BIT; + if (bgep->chipid.device == DEVICE_ID_5719) + regval |= BUFFER_MANAGER_MODE_NO_TX_UNDERRUN; + if (DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) + regval |= BUFFER_MANAGER_MODE_MBLOW_ATTN_ENABLE; + if (!bge_chip_enable_engine(bgep, BUFFER_MANAGER_MODE_REG, regval)) retval = DDI_FAILURE; + if (!bge_chip_enable_engine(bgep, FTQ_RESET_REG, 0)) retval = DDI_FAILURE; /* * Steps 37-39: initialise Receive Buffer (Producer) RCBs */ - if (DEVICE_5717_SERIES_CHIPSETS(bgep)) { + if (DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) { buff_ring_t *brp = &bgep->buff[BGE_STD_BUFF_RING]; bge_reg_put64(bgep, STD_RCV_BD_RING_RCB_REG, brp->desc.cookie.dmac_laddress); @@ -3926,7 +4567,7 @@ bge_chip_start(bge_t *bgep, boolean_t reset_phys) */ bge_reg_put32(bgep, RCV_LP_CONFIG_REG, RCV_LP_CONFIG(bgep->chipid.rx_rings)); - switch (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev)) { + switch (MHCR_CHIP_ASIC_REV(bgep)) { case MHCR_CHIP_ASIC_REV_5700: case MHCR_CHIP_ASIC_REV_5701: case MHCR_CHIP_ASIC_REV_5703: @@ -4008,12 +4649,28 @@ bge_chip_start(bge_t *bgep, boolean_t reset_phys) * Receive List selector.Pay attention:0x3400 is not exist in BCM5714 * and BCM5715. */ + + if (bgep->chipid.device == DEVICE_ID_5719) { + for (i = 0; i < BGE_NUM_RDMA_CHANNELS; i++) { + if (bge_reg_get32(bgep, (BGE_RDMA_LENGTH + (i << 2))) > + bgep->chipid.default_mtu) + break; + } + if (i < BGE_NUM_RDMA_CHANNELS) { + regval = bge_reg_get32(bgep, RDMA_CORR_CTRL_REG); + regval |= RDMA_CORR_CTRL_TX_LENGTH_WA; + bge_reg_put32(bgep, RDMA_CORR_CTRL_REG, regval); + bgep->rdma_length_bug_on_5719 = B_TRUE; + } + } + if (bgep->chipid.tx_rings <= COALESCE_64_BYTE_RINGS && bgep->chipid.rx_rings <= COALESCE_64_BYTE_RINGS) coalmode = COALESCE_64_BYTE_STATUS; else coalmode = 0; - if (DEVICE_5717_SERIES_CHIPSETS(bgep)) + if (DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) coalmode = COALESCE_CLR_TICKS_RX; if (!bge_chip_enable_engine(bgep, HOST_COALESCE_MODE_REG, coalmode)) retval = DDI_FAILURE; @@ -4032,6 +4689,11 @@ bge_chip_start(bge_t *bgep, boolean_t reset_phys) * Step 72: Enable MAC DMA engines * Step 73: Clear & enable MAC statistics */ + if (bgep->ape_enabled) { + /* XXX put32 instead of set32 ? */ + bge_reg_put32(bgep, ETHERNET_MAC_MODE_REG, + ETHERNET_MODE_APE_TX_EN | ETHERNET_MODE_APE_RX_EN); + } bge_reg_set32(bgep, ETHERNET_MAC_MODE_REG, ETHERNET_MODE_ENABLE_FHDE | ETHERNET_MODE_ENABLE_RDE | @@ -4042,6 +4704,14 @@ bge_chip_start(bge_t *bgep, boolean_t reset_phys) ETHERNET_MODE_CLEAR_TX_STATS | ETHERNET_MODE_CLEAR_RX_STATS); + drv_usecwait(140); + + if (bgep->ape_enabled) { + /* Write our heartbeat update interval to APE. */ + bge_ape_put32(bgep, BGE_APE_HOST_HEARTBEAT_INT_MS, + APE_HOST_HEARTBEAT_INT_DISABLE); + } + /* * Step 74: configure the MLCR (Miscellaneous Local Control * Register); not required, as we set up the MLCR in step 10 @@ -4068,23 +4738,28 @@ bge_chip_start(bge_t *bgep, boolean_t reset_phys) retval = DDI_FAILURE; dma_wrprio = (bge_dma_wrprio << DMA_PRIORITY_SHIFT) | ALL_DMA_ATTN_BITS; - if ((MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) == - MHCR_CHIP_ASIC_REV_5755) || - (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) == - MHCR_CHIP_ASIC_REV_5723) || - (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) == - MHCR_CHIP_ASIC_REV_5906)) { + /* the 5723 check here covers all newer chip families (OK) */ + if ((MHCR_CHIP_ASIC_REV(bgep) == MHCR_CHIP_ASIC_REV_5755) || + (MHCR_CHIP_ASIC_REV(bgep) == MHCR_CHIP_ASIC_REV_5723) || + (MHCR_CHIP_ASIC_REV(bgep) == MHCR_CHIP_ASIC_REV_5906)) { dma_wrprio |= DMA_STATUS_TAG_FIX_CQ12384; } if (!bge_chip_enable_engine(bgep, WRITE_DMA_MODE_REG, dma_wrprio)) retval = DDI_FAILURE; + + drv_usecwait(40); + if (DEVICE_5723_SERIES_CHIPSETS(bgep) || - DEVICE_5717_SERIES_CHIPSETS(bgep)) + DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) bge_dma_rdprio = 0; if (!bge_chip_enable_engine(bgep, READ_DMA_MODE_REG, (bge_dma_rdprio << DMA_PRIORITY_SHIFT) | ALL_DMA_ATTN_BITS)) retval = DDI_FAILURE; + + drv_usecwait(40); + if (!bge_chip_enable_engine(bgep, RCV_DATA_COMPLETION_MODE_REG, STATE_MACHINE_ATTN_ENABLE_BIT)) retval = DDI_FAILURE; @@ -4112,12 +4787,21 @@ bge_chip_start(bge_t *bgep, boolean_t reset_phys) STATE_MACHINE_ATTN_ENABLE_BIT)) retval = DDI_FAILURE; + drv_usecwait(40); + /* * Step 88: download firmware -- doesn't apply * Steps 89-90: enable Transmit & Receive MAC Engines */ - if (!bge_chip_enable_engine(bgep, TRANSMIT_MAC_MODE_REG, 0)) + regval = 0; + if (DEVICE_5717_SERIES_CHIPSETS(bgep)) { + regval |= TRANSMIT_MODE_MBUF_LOCKUP_FIX; + } + if (!bge_chip_enable_engine(bgep, TRANSMIT_MAC_MODE_REG, regval)) retval = DDI_FAILURE; + + drv_usecwait(100); + #ifdef BGE_IPMI_ASF if (!bgep->asf_enabled) { if (!bge_chip_enable_engine(bgep, RECEIVE_MAC_MODE_REG, @@ -4133,6 +4817,8 @@ bge_chip_start(bge_t *bgep, boolean_t reset_phys) retval = DDI_FAILURE; #endif + drv_usecwait(100); + /* * Step 91: disable auto-polling of PHY status */ @@ -4169,8 +4855,16 @@ bge_chip_start(bge_t *bgep, boolean_t reset_phys) * restart autoneg (if required) */ if (reset_phys) + { if (bge_phys_update(bgep) == DDI_FAILURE) retval = DDI_FAILURE; + /* forcing a mac link update here */ + bge_phys_check(bgep); + bgep->link_state = (bgep->param_link_up) ? LINK_STATE_UP : + LINK_STATE_DOWN; + bge_sync_mac_modes(bgep); + mac_link_update(bgep->mh, bgep->link_state); + } /* * Extra step (DSG): hand over all the Receive Buffers to the chip @@ -4213,6 +4907,17 @@ bge_chip_start(bge_t *bgep, boolean_t reset_phys) } #endif + if (DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) { + bge_cfg_clr16(bgep, PCI_CONF_DEV_CTRL_5717, + DEV_CTRL_NO_SNOOP | DEV_CTRL_RELAXED); +#if 0 + mhcr = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_MHCR); + pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, + (mhcr | MHCR_TLP_MINOR_ERR_TOLERANCE)); +#endif + } + /* * Step 97: enable PCI interrupts!!! */ @@ -4283,6 +4988,53 @@ bge_wake_factotum(bge_t *bgep) mutex_exit(bgep->softintrlock); } +static void +bge_intr_error_handler(bge_t *bgep) +{ + uint32_t flow; + uint32_t rdma; + uint32_t wdma; + uint32_t tmac; + uint32_t rmac; + uint32_t rxrs; + uint32_t emac; + uint32_t msis; + uint32_t txrs = 0; + + ASSERT(mutex_owned(bgep->genlock)); + + /* + * Read all the registers that show the possible + * reasons for the ERROR bit to be asserted + */ + flow = bge_reg_get32(bgep, FLOW_ATTN_REG); + rdma = bge_reg_get32(bgep, READ_DMA_STATUS_REG); + wdma = bge_reg_get32(bgep, WRITE_DMA_STATUS_REG); + tmac = bge_reg_get32(bgep, TRANSMIT_MAC_STATUS_REG); + rmac = bge_reg_get32(bgep, RECEIVE_MAC_STATUS_REG); + rxrs = bge_reg_get32(bgep, RX_RISC_STATE_REG); + emac = bge_reg_get32(bgep, ETHERNET_MAC_STATUS_REG); + msis = bge_reg_get32(bgep, MSI_STATUS_REG); + if (DEVICE_5704_SERIES_CHIPSETS(bgep)) + txrs = bge_reg_get32(bgep, TX_RISC_STATE_REG); + + BGE_DEBUG(("factotum($%p) flow 0x%x rdma 0x%x wdma 0x%x emac 0x%x msis 0x%x", + (void *)bgep, flow, rdma, wdma, emac, msis)); + BGE_DEBUG(("factotum($%p) tmac 0x%x rmac 0x%x rxrs 0x%08x txrs 0x%08x", + (void *)bgep, tmac, rmac, rxrs, txrs)); + + /* + * For now, just clear all the errors ... + */ + if (DEVICE_5704_SERIES_CHIPSETS(bgep)) + bge_reg_put32(bgep, TX_RISC_STATE_REG, ~0); + bge_reg_put32(bgep, RX_RISC_STATE_REG, ~0); + bge_reg_put32(bgep, RECEIVE_MAC_STATUS_REG, ~0); + bge_reg_put32(bgep, WRITE_DMA_STATUS_REG, ~0); + bge_reg_put32(bgep, READ_DMA_STATUS_REG, ~0); + bge_reg_put32(bgep, FLOW_ATTN_REG, ~0); +} + /* * bge_intr() -- handle chip interrupts */ @@ -4320,7 +5072,8 @@ bge_intr(caddr_t arg1, caddr_t arg2) * bit is *zero* when the interrupt is asserted. */ regval = bge_reg_get32(bgep, MISC_LOCAL_CONTROL_REG); - if (!(DEVICE_5717_SERIES_CHIPSETS(bgep)) && + if (!(DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) && (regval & MLCR_INTA_STATE)) { if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) @@ -4372,8 +5125,9 @@ bge_intr(caddr_t arg1, caddr_t arg2) mutex_exit(bgep->genlock); return (DDI_INTR_CLAIMED); } - retval = bge_status_sync(bgep, STATUS_FLAG_UPDATED, - &flags); + + retval = bge_status_sync(bgep, STATUS_FLAG_UPDATED | + STATUS_FLAG_LINK_CHANGED | STATUS_FLAG_ERROR, &flags); if (retval != DDI_FM_OK) { bgep->bge_dma_error = B_TRUE; goto chip_stop; @@ -4391,6 +5145,28 @@ bge_intr(caddr_t arg1, caddr_t arg2) DDI_FM_OK) goto chip_stop; + if (flags & STATUS_FLAG_LINK_CHANGED) { + BGE_DEBUG(("bge_intr($%p) ($%p) link event", arg1, arg2)); + if (bge_phys_check(bgep)) { + bgep->link_state = bgep->param_link_up ? + LINK_STATE_UP : LINK_STATE_DOWN; + bge_sync_mac_modes(bgep); + mac_link_update(bgep->mh, bgep->link_state); + } + + if (bge_check_acc_handle(bgep, bgep->io_handle) != + DDI_FM_OK) + goto chip_stop; + } + + if (flags & STATUS_FLAG_ERROR) { + bge_intr_error_handler(bgep); + + if (bge_check_acc_handle(bgep, bgep->io_handle) != + DDI_FM_OK) + goto chip_stop; + } + /* * Drop the mutex while we: * Receive any newly-arrived packets @@ -4421,15 +5197,6 @@ bge_intr(caddr_t arg1, caddr_t arg2) bgep->missed_dmas = 0; } - /* - * Check for exceptional conditions that we need to handle - * - * Link status changed - * Status block not updated - */ - if (flags & STATUS_FLAG_LINK_CHANGED) - bge_wake_factotum(bgep); - if (bgep->missed_dmas) { /* * Probably due to the internal status tag not @@ -4481,6 +5248,7 @@ bge_intr(caddr_t arg1, caddr_t arg2) return (result); chip_stop: + #ifdef BGE_IPMI_ASF if (bgep->asf_enabled && bgep->asf_status == ASF_STAT_RUN) { /* @@ -4510,157 +5278,6 @@ chip_stop: #undef BGE_DBG #define BGE_DBG BGE_DBG_FACT /* debug flag for this code */ -static void bge_factotum_error_handler(bge_t *bgep); -#pragma no_inline(bge_factotum_error_handler) - -static void -bge_factotum_error_handler(bge_t *bgep) -{ - uint32_t flow; - uint32_t rdma; - uint32_t wdma; - uint32_t tmac; - uint32_t rmac; - uint32_t rxrs; - uint32_t txrs = 0; - - ASSERT(mutex_owned(bgep->genlock)); - - /* - * Read all the registers that show the possible - * reasons for the ERROR bit to be asserted - */ - flow = bge_reg_get32(bgep, FLOW_ATTN_REG); - rdma = bge_reg_get32(bgep, READ_DMA_STATUS_REG); - wdma = bge_reg_get32(bgep, WRITE_DMA_STATUS_REG); - tmac = bge_reg_get32(bgep, TRANSMIT_MAC_STATUS_REG); - rmac = bge_reg_get32(bgep, RECEIVE_MAC_STATUS_REG); - rxrs = bge_reg_get32(bgep, RX_RISC_STATE_REG); - if (DEVICE_5704_SERIES_CHIPSETS(bgep)) - txrs = bge_reg_get32(bgep, TX_RISC_STATE_REG); - - BGE_DEBUG(("factotum($%p) flow 0x%x rdma 0x%x wdma 0x%x", - (void *)bgep, flow, rdma, wdma)); - BGE_DEBUG(("factotum($%p) tmac 0x%x rmac 0x%x rxrs 0x%08x txrs 0x%08x", - (void *)bgep, tmac, rmac, rxrs, txrs)); - - /* - * For now, just clear all the errors ... - */ - if (DEVICE_5704_SERIES_CHIPSETS(bgep)) - bge_reg_put32(bgep, TX_RISC_STATE_REG, ~0); - bge_reg_put32(bgep, RX_RISC_STATE_REG, ~0); - bge_reg_put32(bgep, RECEIVE_MAC_STATUS_REG, ~0); - bge_reg_put32(bgep, WRITE_DMA_STATUS_REG, ~0); - bge_reg_put32(bgep, READ_DMA_STATUS_REG, ~0); - bge_reg_put32(bgep, FLOW_ATTN_REG, ~0); -} - -/* - * Handler for hardware link state change. - * - * When this routine is called, the hardware link state has changed - * and the new state is reflected in the param_* variables. Here - * we must update the softstate and reprogram the MAC to match. - */ -static void bge_factotum_link_handler(bge_t *bgep); -#pragma no_inline(bge_factotum_link_handler) - -static void -bge_factotum_link_handler(bge_t *bgep) -{ - ASSERT(mutex_owned(bgep->genlock)); - - /* - * Update the s/w link_state - */ - if (bgep->param_link_up) - bgep->link_state = LINK_STATE_UP; - else - bgep->link_state = LINK_STATE_DOWN; - - /* - * Reprogram the MAC modes to match - */ - bge_sync_mac_modes(bgep); -} - -static boolean_t bge_factotum_link_check(bge_t *bgep, int *dma_state); -#pragma no_inline(bge_factotum_link_check) - -static boolean_t -bge_factotum_link_check(bge_t *bgep, int *dma_state) -{ - boolean_t check; - uint64_t flags; - uint32_t tmac_status; - - ASSERT(mutex_owned(bgep->genlock)); - - /* - * Get & clear the writable status bits in the Tx status register - * (some bits are write-1-to-clear, others are just readonly). - */ - tmac_status = bge_reg_get32(bgep, TRANSMIT_MAC_STATUS_REG); - bge_reg_put32(bgep, TRANSMIT_MAC_STATUS_REG, tmac_status); - - /* - * Get & clear the ERROR and LINK_CHANGED bits from the status block - */ - *dma_state = bge_status_sync(bgep, STATUS_FLAG_ERROR | - STATUS_FLAG_LINK_CHANGED, &flags); - if (*dma_state != DDI_FM_OK) - return (B_FALSE); - - /* - * Clear any errors flagged in the status block ... - */ - if (flags & STATUS_FLAG_ERROR) - bge_factotum_error_handler(bgep); - - /* - * We need to check the link status if: - * the status block says there's been a link change - * or there's any discrepancy between the various - * flags indicating the link state (link_state, - * param_link_up, and the LINK STATE bit in the - * Transmit MAC status register). - */ - check = (flags & STATUS_FLAG_LINK_CHANGED) != 0; - switch (bgep->link_state) { - case LINK_STATE_UP: - check |= (bgep->param_link_up == B_FALSE); - check |= ((tmac_status & TRANSMIT_STATUS_LINK_UP) == 0); - break; - - case LINK_STATE_DOWN: - check |= (bgep->param_link_up != B_FALSE); - check |= ((tmac_status & TRANSMIT_STATUS_LINK_UP) != 0); - break; - - default: - check = B_TRUE; - break; - } - - /* - * If <check> is false, we're sure the link hasn't changed. - * If true, however, it's not yet definitive; we have to call - * bge_phys_check() to determine whether the link has settled - * into a new state yet ... and if it has, then call the link - * state change handler.But when the chip is 5700 in Dell 6650 - * ,even if check is false, the link may have changed.So we - * have to call bge_phys_check() to determine the link state. - */ - if (check || bgep->chipid.device == DEVICE_ID_5700) { - check = bge_phys_check(bgep); - if (check) - bge_factotum_link_handler(bgep); - } - - return (check); -} - /* * Factotum routine to check for Tx stall, using the 'watchdog' counter */ @@ -4710,9 +5327,7 @@ bge_factotum_stall_check(bge_t *bgep) /* * The factotum is woken up when there's something to do that we'd rather * not do from inside a hardware interrupt handler or high-level cyclic. - * Its two main tasks are: - * reset & restart the chip after an error - * check the link status whenever necessary + * Its main task is to reset & restart the chip after an error. */ uint_t bge_chip_factotum(caddr_t arg); #pragma no_inline(bge_chip_factotum) @@ -4723,7 +5338,6 @@ bge_chip_factotum(caddr_t arg) bge_t *bgep; uint_t result; boolean_t error; - boolean_t linkchg; int dma_state; bgep = (void *)arg; @@ -4740,7 +5354,6 @@ bge_chip_factotum(caddr_t arg) result = DDI_INTR_CLAIMED; error = B_FALSE; - linkchg = B_FALSE; mutex_enter(bgep->genlock); switch (bgep->bge_chip_state) { @@ -4748,7 +5361,16 @@ bge_chip_factotum(caddr_t arg) break; case BGE_CHIP_RUNNING: - linkchg = bge_factotum_link_check(bgep, &dma_state); + + if (bgep->chipid.device == DEVICE_ID_5700) { + if (bge_phys_check(bgep)) { + bgep->link_state = (bgep->param_link_up) ? + LINK_STATE_UP : LINK_STATE_DOWN; + bge_sync_mac_modes(bgep); + mac_link_update(bgep->mh, bgep->link_state); + } + } + error = bge_factotum_stall_check(bgep); if (dma_state != DDI_FM_OK) { bgep->bge_dma_error = B_TRUE; @@ -4827,7 +5449,6 @@ bge_chip_factotum(caddr_t arg) break; } - /* * If an error is detected, stop the chip now, marking it as * faulty, so that it will be reset next time through ... @@ -4857,25 +5478,6 @@ bge_chip_factotum(caddr_t arg) } mutex_exit(bgep->genlock); - /* - * If the link state changed, tell the world about it. - * Note: can't do this while still holding the mutex. - */ - if (bgep->link_update_timer == BGE_LINK_UPDATE_TIMEOUT && - bgep->link_state != LINK_STATE_UNKNOWN) - linkchg = B_TRUE; - else if (bgep->link_update_timer < BGE_LINK_UPDATE_TIMEOUT && - bgep->link_state == LINK_STATE_DOWN) - linkchg = B_FALSE; - - if (linkchg) { - mac_link_update(bgep->mh, bgep->link_state); - bgep->link_update_timer = BGE_LINK_UPDATE_DONE; - } - if (bgep->manual_reset) { - bgep->manual_reset = B_FALSE; - } - return (result); } @@ -4893,6 +5495,7 @@ void bge_chip_cyclic(void *arg) { bge_t *bgep; + uint32_t regval; bgep = arg; @@ -4901,22 +5504,44 @@ bge_chip_cyclic(void *arg) return; case BGE_CHIP_RUNNING: + + /* XXX I really don't like this forced interrupt... */ bge_reg_set32(bgep, HOST_COALESCE_MODE_REG, COALESCE_NOW); if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED); - if (bgep->link_update_timer < BGE_LINK_UPDATE_TIMEOUT) - bgep->link_update_timer++; - break; case BGE_CHIP_FAULT: case BGE_CHIP_ERROR: + break; } + mutex_enter(bgep->genlock); + + if (bgep->eee_lpi_wait && !--bgep->eee_lpi_wait) { + BGE_DEBUG(("eee cyclic, lpi enabled")); + bge_eee_enable(bgep); + } + + if (bgep->rdma_length_bug_on_5719) { + if ((bge_reg_get32(bgep, STAT_IFHCOUT_UPKGS_REG) + + bge_reg_get32(bgep, STAT_IFHCOUT_MPKGS_REG) + + bge_reg_get32(bgep, STAT_IFHCOUT_BPKGS_REG)) > + BGE_NUM_RDMA_CHANNELS) { + regval = bge_reg_get32(bgep, RDMA_CORR_CTRL_REG); + regval &= ~RDMA_CORR_CTRL_TX_LENGTH_WA; + bge_reg_put32(bgep, RDMA_CORR_CTRL_REG, regval); + bgep->rdma_length_bug_on_5719 = B_FALSE; + } + } + + mutex_exit(bgep->genlock); + bge_wake_factotum(bgep); + } @@ -5784,8 +6409,10 @@ bge_nic_read32(bge_t *bgep, bge_regno_t addr) addr = addr + 4; } #else - if (DEVICE_5717_SERIES_CHIPSETS(bgep)) + if (DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) { addr = LE_32(addr); + } #endif pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MWBAR, addr); @@ -5925,6 +6552,10 @@ bge_asf_pre_reset_operations(bge_t *bgep, uint32_t mode) break; } } + + if (mode == BGE_INIT_RESET || + mode == BGE_SUSPEND_RESET) + bge_ape_driver_state_change(bgep, mode); } @@ -5965,6 +6596,9 @@ bge_asf_post_reset_new_mode(bge_t *bgep, uint32_t mode) default: break; } + + if (mode == BGE_SHUTDOWN_RESET) + bge_ape_driver_state_change(bgep, mode); } #endif /* BGE_IPMI_ASF */ diff --git a/usr/src/uts/common/io/bge/bge_hw.h b/usr/src/uts/common/io/bge/bge_hw.h index f8e6c4d09a..506b97774e 100644 --- a/usr/src/uts/common/io/bge/bge_hw.h +++ b/usr/src/uts/common/io/bge/bge_hw.h @@ -20,7 +20,13 @@ */ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010-2013, by Broadcom, Inc. + * All Rights Reserved. + */ + +/* + * Copyright (c) 2002, 2010, Oracle and/or its affiliates. + * All rights reserved. */ #ifndef _BGE_HW_H @@ -64,8 +70,13 @@ extern "C" { #define DEVICE_ID_5705C 0x1653 #define DEVICE_ID_5705_2 0x1654 #define DEVICE_ID_5717 0x1655 +#define DEVICE_ID_5717_C0 0x1665 #define DEVICE_ID_5718 0x1656 +#define DEVICE_ID_5719 0x1657 +#define DEVICE_ID_5720 0x165f #define DEVICE_ID_5724 0x165c +#define DEVICE_ID_5725 0x1643 +#define DEVICE_ID_5727 0x16f3 #define DEVICE_ID_5705M 0x165d #define DEVICE_ID_5705MA3 0x165e #define DEVICE_ID_5705F 0x166e @@ -187,9 +198,15 @@ extern "C" { (bgep->chipid.device == DEVICE_ID_5789)) #define DEVICE_5717_SERIES_CHIPSETS(bgep) \ - (bgep->chipid.device == DEVICE_ID_5717) ||\ + ((bgep->chipid.device == DEVICE_ID_5717) ||\ (bgep->chipid.device == DEVICE_ID_5718) ||\ - (bgep->chipid.device == DEVICE_ID_5724) + (bgep->chipid.device == DEVICE_ID_5719) ||\ + (bgep->chipid.device == DEVICE_ID_5720) ||\ + (bgep->chipid.device == DEVICE_ID_5724)) + +#define DEVICE_5725_SERIES_CHIPSETS(bgep) \ + ((bgep->chipid.device == DEVICE_ID_5725) ||\ + (bgep->chipid.device == DEVICE_ID_5727)) #define DEVICE_5723_SERIES_CHIPSETS(bgep) \ ((bgep->chipid.device == DEVICE_ID_5723) ||\ @@ -230,11 +247,13 @@ extern "C" { #define MHCR_ENABLE_INDIRECT_ACCESS 0x00000080 #define MHCR_ENABLE_REGISTER_WORD_SWAP 0x00000040 #define MHCR_ENABLE_CLOCK_CONTROL_WRITE 0x00000020 -#define MHCR_ENABLE_PCI_STATE_WRITE 0x00000010 +#define MHCR_ENABLE_PCI_STATE_RW 0x00000010 #define MHCR_ENABLE_ENDIAN_WORD_SWAP 0x00000008 #define MHCR_ENABLE_ENDIAN_BYTE_SWAP 0x00000004 #define MHCR_MASK_PCI_INT_OUTPUT 0x00000002 #define MHCR_CLEAR_INTERRUPT_INTA 0x00000001 +#define MHCR_BOUNDARY_CHECK 0x00002000 +#define MHCR_TLP_MINOR_ERR_TOLERANCE 0x00008000 #define MHCR_CHIP_REV_5700_B0 0x71000000 #define MHCR_CHIP_REV_5700_B2 0x71020000 @@ -304,12 +323,11 @@ extern "C" { #define MHCR_CHIP_REV_5906_A1 0xc0010000 #define MHCR_CHIP_REV_5906_A2 0xc0020000 -#define MHCR_CHIP_REV_5723_A0 0xf0000000 -#define MHCR_CHIP_REV_5723_A1 0xf0010000 -#define MHCR_CHIP_REV_5723_A2 0xf0020000 -#define MHCR_CHIP_REV_5723_B0 0xf1000000 +#define CHIP_ASIC_REV_USE_PROD_ID_REG 0xf0000000 +#define MHCR_CHIP_ASIC_REV(bgep) ((bgep)->chipid.asic_rev & 0xf0000000) +#define CHIP_ASIC_REV_PROD_ID(bgep) ((bgep)->chipid.asic_rev_prod_id) +#define CHIP_ASIC_REV(bgep) ((bgep)->chipid.asic_rev_prod_id >> 12) -#define MHCR_CHIP_ASIC_REV(ChipRevId) ((ChipRevId) & 0xf0000000) #define MHCR_CHIP_ASIC_REV_5700 (0x7 << 28) #define MHCR_CHIP_ASIC_REV_5701 (0x0 << 28) #define MHCR_CHIP_ASIC_REV_5703 (0x1 << 28) @@ -323,8 +341,30 @@ extern "C" { #define MHCR_CHIP_ASIC_REV_5755 ((uint32_t)0xa << 28) #define MHCR_CHIP_ASIC_REV_5715 ((uint32_t)0x9 << 28) #define MHCR_CHIP_ASIC_REV_5906 ((uint32_t)0xc << 28) +/* (0xf << 28) touches all 5717 and 5725 series as well (OK) */ #define MHCR_CHIP_ASIC_REV_5723 ((uint32_t)0xf << 28) +#define CHIP_ASIC_REV_5723 0x5784 +#define CHIP_ASIC_REV_5761 0x5761 +#define CHIP_ASIC_REV_5785 0x5785 +#define CHIP_ASIC_REV_57780 0x57780 + +#define CHIP_ASIC_REV_5717 0x5717 +#define CHIP_ASIC_REV_5719 0x5719 +#define CHIP_ASIC_REV_5720 0x5720 +#define CHIP_ASIC_REV_5762 0x5762 /* 5725/5727 */ + +#define CHIP_ASIC_REV_PROD_ID_REG 0x000000bc +#define CHIP_ASIC_REV_PROD_ID_GEN2_REG 0x000000f4 + +#define CHIP_ASIC_REV_5717_B0 0x05717100 +#define CHIP_ASIC_REV_5717_C0 0x05717200 +#define CHIP_ASIC_REV_5718_B0 0x05717100 +#define CHIP_ASIC_REV_5719_A0 0x05719000 +#define CHIP_ASIC_REV_5719_A1 0x05719001 +#define CHIP_ASIC_REV_5720_A0 0x05720000 +#define CHIP_ASIC_REV_5725_A0 0x05762000 +#define CHIP_ASIC_REV_5727_B0 0x05762100 /* * PCI DMA read/write Control Register, in PCI config space @@ -374,6 +414,9 @@ extern "C" { * is set in the MHCR, EXCEPT for the RETRY_SAME_DMA bit which is always RW */ #define PCI_CONF_BGE_PCISTATE 0x70 +#define PCISTATE_ALLOW_APE_CTLSPC_WR 0x00010000 +#define PCISTATE_ALLOW_APE_SHMEM_WR 0x00020000 +#define PCISTATE_ALLOW_APE_PSPACE_WR 0x00040000 #define PCISTATE_RETRY_SAME_DMA 0x00002000 #define PCISTATE_FLAT_VIEW 0x00000100 #define PCISTATE_EXT_ROM_RETRY 0x00000040 @@ -458,7 +501,10 @@ extern "C" { */ #define PCI_CONF_DEV_CTRL 0xd8 #define PCI_CONF_DEV_CTRL_5723 0xd4 +#define PCI_CONF_DEV_CTRL_5717 0xb4 +#define READ_REQ_SIZE_MASK 0x7000 #define READ_REQ_SIZE_MAX 0x5000 +#define READ_REQ_SIZE_2K 0x4000 #define DEV_CTRL_NO_SNOOP 0x0800 #define DEV_CTRL_RELAXED 0x0010 @@ -497,7 +543,7 @@ extern "C" { #define NIC_MEM_SHADOW_SEND_7_8 0x7000 /* bogus */ #define NIC_MEM_SHADOW_SEND_9_16 0x8000 /* bogus */ #define NIC_MEM_SHADOW_BUFF_STD 0x6000 -#define NIC_MEM_SHADOW_BUFF_STD_5717 0x40000 +#define NIC_MEM_SHADOW_BUFF_STD_5717 0x40000 #define NIC_MEM_SHADOW_BUFF_JUMBO 0x7000 #define NIC_MEM_SHADOW_BUFF_MINI 0x8000 /* bogus */ #define NIC_MEM_SHADOW_SEND_RING(ring, nslots) (0x4000 + 4*(ring)*(nslots)) @@ -540,9 +586,31 @@ extern "C" { #define HOST_COALESCE_MODE_REG 0x3c00 #define MEMORY_ARBITER_MODE_REG 0x4000 #define BUFFER_MANAGER_MODE_REG 0x4400 +#define BUFFER_MANAGER_MODE_NO_TX_UNDERRUN 0x80000000 +#define BUFFER_MANAGER_MODE_MBLOW_ATTN_ENABLE 0x00000010 #define READ_DMA_MODE_REG 0x4800 #define WRITE_DMA_MODE_REG 0x4c00 #define DMA_COMPLETION_MODE_REG 0x6400 +#define FAST_BOOT_PC 0x6894 + +#define RDMA_RSRV_CTRL_REG 0x4900 +#define RDMA_RSRV_CTRL_REG2 0x4890 +#define RDMA_RSRV_CTRL_FIFO_OFLW_FIX 0x00000004 +#define RDMA_RSRV_CTRL_FIFO_LWM_1_5K 0x00000c00 +#define RDMA_RSRV_CTRL_FIFO_LWM_MASK 0x00000ff0 +#define RDMA_RSRV_CTRL_FIFO_HWM_1_5K 0x000c0000 +#define RDMA_RSRV_CTRL_FIFO_HWM_MASK 0x000ff000 +#define RDMA_RSRV_CTRL_TXMRGN_320B 0x28000000 +#define RDMA_RSRV_CTRL_TXMRGN_MASK 0xffe00000 + +#define RDMA_CORR_CTRL_REG 0x4910 +#define RDMA_CORR_CTRL_REG2 0x48a0 +#define RDMA_CORR_CTRL_BLEN_BD_4K 0x00030000 +#define RDMA_CORR_CTRL_BLEN_LSO_4K 0x000c0000 +#define RDMA_CORR_CTRL_TX_LENGTH_WA 0x02000000 + +#define BGE_NUM_RDMA_CHANNELS 4 +#define BGE_RDMA_LENGTH 0x4be0 /* * Other bits in some of the above state machine control registers @@ -552,6 +620,7 @@ extern "C" { * Transmit MAC Mode Register * (TRANSMIT_MAC_MODE_REG, 0x045c) */ +#define TRANSMIT_MODE_MBUF_LOCKUP_FIX 0x00000100 #define TRANSMIT_MODE_LONG_PAUSE 0x00000040 #define TRANSMIT_MODE_BIG_BACKOFF 0x00000020 #define TRANSMIT_MODE_FLOW_CONTROL 0x00000010 @@ -682,6 +751,8 @@ extern "C" { * Ethernet MAC Mode Register */ #define ETHERNET_MAC_MODE_REG 0x0400 +#define ETHERNET_MODE_APE_TX_EN 0x10000000 +#define ETHERNET_MODE_APE_RX_EN 0x08000000 #define ETHERNET_MODE_ENABLE_FHDE 0x00800000 #define ETHERNET_MODE_ENABLE_RDE 0x00400000 #define ETHERNET_MODE_ENABLE_TDE 0x00200000 @@ -970,8 +1041,12 @@ extern "C" { #define SERDES_STATUS_COMMA_DETECTED 0x00000100 #define SERDES_STATUS_RXSTAT 0x000000ff +/* 5780/5714 only */ +#define SERDES_RX_CONTROL 0x000005b0 +#define SERDES_RX_CONTROL_SIG_DETECT 0x00000400 + /* - * SGMII Status Register (5717/5718 only) + * SGMII Status Register (5717/18/19/20 only) */ #define SGMII_STATUS_REG 0x5B4 #define MEDIA_SELECTION_MODE 0x00000100 @@ -1069,10 +1144,53 @@ extern "C" { #define JUMBO_RCV_BD_REPLENISH_DEFAULT 0x00000020 /* 32 */ /* - * CPMU registers (5717/5718 only) + * CPMU registers (5717/18/19/20 only) + */ +#define CPMU_CLCK_ORIDE_REG 0x3624 +#define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000 +#define CPMU_STATUS_REG 0x362c +#define CPMU_STATUS_FUNC_NUM 0x20000000 +#define CPMU_STATUS_FUNC_NUM_SHIFT 29 +#define CPMU_STATUS_FUNC_NUM_5719 0xc0000000 +#define CPMU_STATUS_FUNC_NUM_5719_SHIFT 30 + +/* + * EEE registers (5718/19/20 only) */ -#define CPMU_STATUS_REG 0x362c -#define CPMU_STATUS_FUN_NUM 0x20000000 +#define EEE_MODE_REG 0x36b0 +#define EEE_MODE_APE_TX_DET_EN 0x00000004 +#define EEE_MODE_ERLY_L1_XIT_DET 0x00000008 +#define EEE_MODE_SND_IDX_DET_EN 0x00000040 +#define EEE_MODE_LPI_ENABLE 0x00000080 +#define EEE_MODE_LPI_IN_TX 0x00000100 +#define EEE_MODE_LPI_IN_RX 0x00000200 +#define EEE_MODE_EEE_ENABLE 0x00100000 + +#define EEE_DEBOUNCE_T1_CONTROL_REG 0x36b4 +#define EEE_DEBOUNCE_T1_PCIEXIT_2047US 0x07ff0000 +#define EEE_DEBOUNCE_T1_LNKIDLE_2047US 0x000007ff + +#define EEE_DEBOUNCE_T2_CONTROL_REG 0x36b8 +#define EEE_DEBOUNCE_T2_APE_TX_2047US 0x07ff0000 +#define EEE_DEBOUNCE_T2_TXIDXEQ_2047US 0x000007ff + +#define EEE_LINK_IDLE_CONTROL_REG 0x36bc +#define EEE_LINK_IDLE_PCIE_NL0 0x01000000 +#define EEE_LINK_IDLE_UART_IDL 0x00000004 +#define EEE_LINK_IDLE_APE_TX_MT 0x00000002 + +#define EEE_CONTROL_REG 0x36d0 +#define EEE_CONTROL_EXIT_16_5_US 0x0000019d +#define EEE_CONTROL_EXIT_36_US 0x00000384 +#define EEE_CONTROL_EXIT_20_1_US 0x000001f8 + +/* Clause 45 expansion registers */ +#define EEE_CL45_D7_RESULT_STAT 0x803e +#define EEE_CL45_D7_RESULT_STAT_LP_100TX 0x0002 +#define EEE_CL45_D7_RESULT_STAT_LP_1000T 0x0004 + +#define MDIO_MMD_AN 0x0007 +#define MDIO_AN_EEE_ADV 0x003c /* * Host Coalescing Engine Control Registers @@ -1259,25 +1377,43 @@ extern "C" { * Miscellaneous Local Control Register (MLCR) */ #define MISC_LOCAL_CONTROL_REG 0x6808 + #define MLCR_PCI_CTRL_SELECT 0x10000000 #define MLCR_LEGACY_PCI_MODE 0x08000000 #define MLCR_AUTO_SEEPROM_ACCESS 0x01000000 #define MLCR_SSRAM_CYCLE_DESELECT 0x00800000 #define MLCR_SSRAM_TYPE 0x00400000 #define MLCR_BANK_SELECT 0x00200000 + +#define MLCR_SRAM_SIZE_16M 0x00180000 +#define MLCR_SRAM_SIZE_8M 0x00140000 +#define MLCR_SRAM_SIZE_4M 0x00100000 +#define MLCR_SRAM_SIZE_2M 0x000c0000 +#define MLCR_SRAM_SIZE_1M 0x00080000 +#define MLCR_SRAM_SIZE_512K 0x00040000 +#define MLCR_SRAM_SIZE_256K 0x00000000 #define MLCR_SRAM_SIZE_MASK 0x001c0000 -#define MLCR_ENABLE_EXTERNAL_MEMORY 0x00020000 +#define MLCR_ENABLE_EXTERNAL_MEMORY 0x00020000 #define MLCR_MISC_PINS_OUTPUT_2 0x00010000 + #define MLCR_MISC_PINS_OUTPUT_1 0x00008000 #define MLCR_MISC_PINS_OUTPUT_0 0x00004000 #define MLCR_MISC_PINS_OUTPUT_ENABLE_2 0x00002000 #define MLCR_MISC_PINS_OUTPUT_ENABLE_1 0x00001000 + #define MLCR_MISC_PINS_OUTPUT_ENABLE_0 0x00000800 #define MLCR_MISC_PINS_INPUT_2 0x00000400 /* R/O */ #define MLCR_MISC_PINS_INPUT_1 0x00000200 /* R/O */ #define MLCR_MISC_PINS_INPUT_0 0x00000100 /* R/O */ +#define MLCR_GPIO_OUTPUT3 0x00000080 +#define MLCR_GPIO_OE3 0x00000040 +#define MLCR_USE_EXT_SIG_DETECT 0x00000020 /* 5714/5780 only */ +#define MLCR_GPIO_INPUT3 0x00000020 +#define MLCR_GPIO_UART_SEL 0x00000010 /* 5755 only */ +#define MLCR_USE_SIG_DETECT 0x00000010 /* 5714/5780 only */ + #define MLCR_INT_ON_ATTN 0x00000008 /* R/W */ #define MLCR_SET_INT 0x00000004 /* W/O */ #define MLCR_CLR_INT 0x00000002 /* W/O */ @@ -1292,9 +1428,20 @@ extern "C" { * just this fashion. It has to be set as an OUTPUT and driven LOW to * enable writing. Otherwise, the SEEPROM is protected. */ -#define MLCR_DEFAULT 0x0101c000 -#define MLCR_DEFAULT_5714 0x1901c000 -#define MLCR_DEFAULT_5717 0x01000000 +#define MLCR_DEFAULT (MLCR_AUTO_SEEPROM_ACCESS | \ + MLCR_MISC_PINS_OUTPUT_2 | \ + MLCR_MISC_PINS_OUTPUT_1 | \ + MLCR_MISC_PINS_OUTPUT_0) + +#define MLCR_DEFAULT_5714 (MLCR_PCI_CTRL_SELECT | \ + MLCR_LEGACY_PCI_MODE | \ + MLCR_AUTO_SEEPROM_ACCESS | \ + MLCR_MISC_PINS_OUTPUT_2 | \ + MLCR_MISC_PINS_OUTPUT_1 | \ + MLCR_MISC_PINS_OUTPUT_0 | \ + MLCR_USE_SIG_DETECT) + +#define MLCR_DEFAULT_5717 (MLCR_AUTO_SEEPROM_ACCESS) /* * Serial EEPROM Data/Address Registers (auto-access mode) @@ -1351,31 +1498,58 @@ extern "C" { #define NVM_CFG1_FLASH_MODE 0x00000001 #define NVM_SW_ARBITRATION_REG 0x7020 -#define NVM_READ_REQ3 0X00008000 -#define NVM_READ_REQ2 0X00004000 -#define NVM_READ_REQ1 0X00002000 -#define NVM_READ_REQ0 0X00001000 -#define NVM_WON_REQ3 0X00000800 -#define NVM_WON_REQ2 0X00000400 -#define NVM_WON_REQ1 0X00000200 -#define NVM_WON_REQ0 0X00000100 -#define NVM_RESET_REQ3 0X00000080 -#define NVM_RESET_REQ2 0X00000040 -#define NVM_RESET_REQ1 0X00000020 -#define NVM_RESET_REQ0 0X00000010 -#define NVM_SET_REQ3 0X00000008 -#define NVM_SET_REQ2 0X00000004 -#define NVM_SET_REQ1 0X00000002 -#define NVM_SET_REQ0 0X00000001 +#define NVM_READ_REQ3 0x00008000 +#define NVM_READ_REQ2 0x00004000 +#define NVM_READ_REQ1 0x00002000 +#define NVM_READ_REQ0 0x00001000 +#define NVM_WON_REQ3 0x00000800 +#define NVM_WON_REQ2 0x00000400 +#define NVM_WON_REQ1 0x00000200 +#define NVM_WON_REQ0 0x00000100 +#define NVM_RESET_REQ3 0x00000080 +#define NVM_RESET_REQ2 0x00000040 +#define NVM_RESET_REQ1 0x00000020 +#define NVM_RESET_REQ0 0x00000010 +#define NVM_SET_REQ3 0x00000008 +#define NVM_SET_REQ2 0x00000004 +#define NVM_SET_REQ1 0x00000002 +#define NVM_SET_REQ0 0x00000001 + +#define EEPROM_MAGIC 0x669955aa +#define EEPROM_MAGIC_FW 0xa5000000 +#define EEPROM_MAGIC_FW_MSK 0xff000000 +#define EEPROM_SB_FORMAT_MASK 0x00e00000 +#define EEPROM_SB_FORMAT_1 0x00200000 +#define EEPROM_SB_REVISION_MASK 0x001f0000 +#define EEPROM_SB_REVISION_0 0x00000000 +#define EEPROM_SB_REVISION_2 0x00020000 +#define EEPROM_SB_REVISION_3 0x00030000 +#define EEPROM_SB_REVISION_4 0x00040000 +#define EEPROM_SB_REVISION_5 0x00050000 +#define EEPROM_SB_REVISION_6 0x00060000 +#define EEPROM_MAGIC_HW 0xabcd +#define EEPROM_MAGIC_HW_MSK 0xffff + +#define NVM_DIR_START 0x18 +#define NVM_DIR_END 0x78 +#define NVM_DIRENT_SIZE 0xc +#define NVM_DIRTYPE_SHIFT 24 +#define NVM_DIRTYPE_LENMSK 0x003fffff +#define NVM_DIRTYPE_ASFINI 1 +#define NVM_DIRTYPE_EXTVPD 20 +#define NVM_PTREV_BCVER 0x94 +#define NVM_BCVER_MAJMSK 0x0000ff00 +#define NVM_BCVER_MAJSFT 8 +#define NVM_BCVER_MINMSK 0x000000ff /* * NVM access register * Applicable to BCM5721,BCM5751,BCM5752,BCM5714 * and BCM5715 only. */ -#define NVM_ACCESS_REG 0X7024 -#define NVM_WRITE_ENABLE 0X00000002 -#define NVM_ACCESS_ENABLE 0X00000001 +#define NVM_ACCESS_REG 0x7024 +#define NVM_WRITE_ENABLE 0x00000002 +#define NVM_ACCESS_ENABLE 0x00000001 /* * TLP Control Register @@ -1406,6 +1580,24 @@ extern "C" { /* * Vendor-specific MII registers */ + +#define MII_MMD_CTRL 0x0d /* MMD Access Control register */ +#define MII_MMD_CTRL_DATA_NOINC 0x4000 +#define MII_MMD_ADDRESS_DATA 0x0e /* MMD Address Data register */ + +#define MII_RXR_COUNTERS 0x14 /* Local/Remote Rx Counts */ +#define MII_DSP_RW_PORT 0x15 /* DSP read/write port */ +#define MII_DSP_CONTROL 0x16 /* DSP control register */ +#define MII_DSP_ADDRESS 0x17 /* DSP address register */ + +#define MII_DSP_TAP26 0x001a +#define MII_DSP_TAP26_ALNOKO 0x0001 +#define MII_DSP_TAP26_RMRXSTO 0x0002 +#define MII_DSP_TAP26_OPCSINPT 0x0004 + +#define MII_DSP_CH34TP2 0x4022 +#define MII_DSP_CH34TP2_HIBW01 0x017b + #define MII_EXT_CONTROL MII_VENDOR(0) #define MII_EXT_STATUS MII_VENDOR(1) #define MII_RCV_ERR_COUNT MII_VENDOR(2) @@ -1491,6 +1683,9 @@ extern "C" { #define MII_AUX_CTRL_MISC_WRITE_ENABLE 0x8000 #define MII_AUX_CTRL_MISC_WIRE_SPEED 0x0010 +#define MII_AUX_CTRL_TX_6DB 0x0400 +#define MII_AUX_CTRL_SMDSP_ENA 0x0800 + /* * Write this value to the AUX control register * to select which shadow register will be read @@ -1611,6 +1806,7 @@ typedef struct { #define SBD_FLAG_IP_CKSUM 0x0002 #define SBD_FLAG_PACKET_END 0x0004 #define SBD_FLAG_IP_FRAG 0x0008 +#define SBD_FLAG_JMB_PKT 0x0008 #define SBD_FLAG_IP_FRAG_END 0x0010 #define SBD_FLAG_VLAN_TAG 0x0040 @@ -2095,6 +2291,83 @@ typedef struct { #endif /* BGE_IPMI_ASF */ +/* APE registers. Accessible through BAR1 */ +#define BGE_APE_GPIO_MSG 0x0008 +#define BGE_APE_GPIO_MSG_SHIFT 4 +#define BGE_APE_EVENT 0x000c +#define APE_EVENT_1 0x00000001 +#define BGE_APE_LOCK_REQ 0x002c +#define APE_LOCK_REQ_DRIVER 0x00001000 +#define BGE_APE_LOCK_GRANT 0x004c +#define APE_LOCK_GRANT_DRIVER 0x00001000 +#define BGE_APE_STICKY_TMR 0x00b0 + +/* APE shared memory. Accessible through BAR1 */ +#define BGE_APE_SHMEM_BASE 0x4000 +#define BGE_APE_SEG_SIG 0x4000 +#define APE_SEG_SIG_MAGIC 0x41504521 +#define BGE_APE_FW_STATUS 0x400c +#define APE_FW_STATUS_READY 0x00000100 +#define BGE_APE_FW_FEATURES 0x4010 +#define BGE_APE_FW_FEATURE_NCSI 0x00000002 +#define BGE_APE_FW_VERSION 0x4018 +#define APE_FW_VERSION_MAJMSK 0xff000000 +#define APE_FW_VERSION_MAJSFT 24 +#define APE_FW_VERSION_MINMSK 0x00ff0000 +#define APE_FW_VERSION_MINSFT 16 +#define APE_FW_VERSION_REVMSK 0x0000ff00 +#define APE_FW_VERSION_REVSFT 8 +#define APE_FW_VERSION_BLDMSK 0x000000ff +#define BGE_APE_SEG_MSG_BUF_OFF 0x401c +#define BGE_APE_SEG_MSG_BUF_LEN 0x4020 +#define BGE_APE_HOST_SEG_SIG 0x4200 +#define APE_HOST_SEG_SIG_MAGIC 0x484f5354 +#define BGE_APE_HOST_SEG_LEN 0x4204 +#define APE_HOST_SEG_LEN_MAGIC 0x00000020 +#define BGE_APE_HOST_INIT_COUNT 0x4208 +#define BGE_APE_HOST_DRIVER_ID 0x420c +#define APE_HOST_DRIVER_ID_SOLARIS 0xf4000000 +#define APE_HOST_DRIVER_ID_MAGIC(maj, min) \ + (APE_HOST_DRIVER_ID_SOLARIS | (maj & 0xff) << 16 | (min & 0xff) << 8) +#define BGE_APE_HOST_BEHAVIOR 0x4210 +#define APE_HOST_BEHAV_NO_PHYLOCK 0x00000001 +#define BGE_APE_HOST_HEARTBEAT_INT_MS 0x4214 +#define APE_HOST_HEARTBEAT_INT_DISABLE 0 +#define APE_HOST_HEARTBEAT_INT_5SEC 5000 +#define BGE_APE_HOST_HEARTBEAT_COUNT 0x4218 +#define BGE_APE_HOST_DRVR_STATE 0x421c +#define BGE_APE_HOST_DRVR_STATE_START 0x00000001 +#define BGE_APE_HOST_DRVR_STATE_UNLOAD 0x00000002 +#define BGE_APE_HOST_DRVR_STATE_WOL 0x00000003 +#define BGE_APE_HOST_WOL_SPEED 0x4224 +#define BGE_APE_HOST_WOL_SPEED_AUTO 0x00008000 + +#define BGE_APE_EVENT_STATUS 0x4300 + +#define APE_EVENT_STATUS_DRIVER_EVNT 0x00000010 +#define APE_EVENT_STATUS_STATE_CHNGE 0x00000500 +#define APE_EVENT_STATUS_SCRTCHPD_READ 0x00001600 +#define APE_EVENT_STATUS_SCRTCHPD_WRITE 0x00001700 +#define APE_EVENT_STATUS_STATE_START 0x00010000 +#define APE_EVENT_STATUS_STATE_UNLOAD 0x00020000 +#define APE_EVENT_STATUS_STATE_WOL 0x00030000 +#define APE_EVENT_STATUS_STATE_SUSPEND 0x00040000 +#define APE_EVENT_STATUS_EVENT_PENDING 0x80000000 + +#define BGE_APE_PER_LOCK_REQ 0x8400 +#define APE_LOCK_PER_REQ_DRIVER 0x00001000 +#define BGE_APE_PER_LOCK_GRANT 0x8420 +#define APE_PER_LOCK_GRANT_DRIVER 0x00001000 + +/* APE convenience enumerations. */ +#define BGE_APE_LOCK_PHY0 0 +#define BGE_APE_LOCK_GRC 1 +#define BGE_APE_LOCK_PHY1 2 +#define BGE_APE_LOCK_PHY2 3 +#define BGE_APE_LOCK_MEM 4 +#define BGE_APE_LOCK_PHY3 5 +#define BGE_APE_LOCK_GPIO 7 + #ifdef __cplusplus } #endif diff --git a/usr/src/uts/common/io/bge/bge_impl.h b/usr/src/uts/common/io/bge/bge_impl.h index 772c989092..ccc57b94e7 100644 --- a/usr/src/uts/common/io/bge/bge_impl.h +++ b/usr/src/uts/common/io/bge/bge_impl.h @@ -20,7 +20,13 @@ */ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010-2013, by Broadcom, Inc. + * All Rights Reserved. + */ + +/* + * Copyright (c) 2002, 2010, Oracle and/or its affiliates. + * All rights reserved. */ #ifndef _BGE_IMPL_H @@ -45,7 +51,6 @@ extern "C" { #endif /* __sparcv9 */ #include <sys/kstat.h> #include <sys/ethernet.h> -#include <sys/vlan.h> #include <sys/errno.h> #include <sys/dlpi.h> #include <sys/devops.h> @@ -77,6 +82,18 @@ extern "C" { #include <sys/x86_archext.h> #endif +#ifndef VLAN_TAGSZ +#define VLAN_TAGSZ 4 +#endif + +#define BGE_STR_SIZE 32 + +#ifndef OFFSETOF +#define OFFSETOF(_s, _f) \ + ((uint32_t)((uint8_t *)(&((_s *)0)->_f) - \ + (uint8_t *)((uint8_t *) 0))) +#endif + /* * <sys/ethernet.h> *may* already have provided the typedef ether_addr_t; * but of course C doesn't provide a way to check this directly. So here @@ -94,7 +111,6 @@ typedef uchar_t ether_addr_t[ETHERADDRL]; */ extern int secpolicy_net_config(const cred_t *, boolean_t); -#include <sys/netlb.h> /* originally from cassini */ #include <sys/miiregs.h> /* by fjlite out of intel */ #include "bge.h" @@ -133,6 +149,7 @@ extern int secpolicy_net_config(const cred_t *, boolean_t); #define BGE_PCI_CONFIG_RNUMBER 0 #define BGE_PCI_OPREGS_RNUMBER 1 +#define BGE_PCI_APEREGS_RNUMBER 2 #define BGE_DMA_MODE DDI_DMA_STREAMING #define BGE_HEADROOM 34 @@ -168,6 +185,7 @@ extern int secpolicy_net_config(const cred_t *, boolean_t); #define BGE_HALFTICK 268435456LL /* 2**28 ns! */ #define BGE_CYCLIC_PERIOD (4*BGE_HALFTICK) /* ~1.0s */ +#define BGE_CYCLIC_TIMEOUT (drv_usectohz(1000000)) /* ~1.0s */ #define BGE_SERDES_STABLE_TIME (3*BGE_HALFTICK) /* ~0.8s */ #define BGE_PHY_STABLE_TIME (11*BGE_HALFTICK) /* ~3.0s */ #define BGE_LINK_SETTLE_TIME (111*BGE_HALFTICK) /* ~30.0s */ @@ -301,7 +319,6 @@ extern int secpolicy_net_config(const cred_t *, boolean_t); #define BGE_LOWAT (256) #define BGE_HIWAT (256*1024) - /* * Basic data types, for clarity in distinguishing 'numbers' * used for different purposes ... @@ -593,6 +610,7 @@ enum bge_nvmem_type { */ typedef struct { uint32_t asic_rev; /* masked from MHCR */ + uint32_t asic_rev_prod_id; /* new revision ID format */ uint32_t businfo; /* from private reg */ uint16_t command; /* saved during attach */ @@ -627,6 +645,7 @@ typedef struct { uint32_t rx_rings; /* from bge.conf */ uint32_t tx_rings; /* from bge.conf */ + uint32_t eee; /* from bge.conf */ uint32_t default_mtu; /* from bge.conf */ uint64_t hw_mac_addr; /* from chip register */ @@ -718,11 +737,22 @@ typedef struct bge { /* * These fields are set by attach() and unchanged thereafter ... */ + char version[BGE_STR_SIZE]; +#define BGE_FW_VER_SIZE 32 + char fw_version[BGE_FW_VER_SIZE]; dev_info_t *devinfo; /* device instance */ + uint32_t pci_bus; /* from "regs" prop */ + uint32_t pci_dev; /* from "regs" prop */ + uint32_t pci_func; /* from "regs" prop */ mac_handle_t mh; /* mac module handle */ ddi_acc_handle_t cfg_handle; /* DDI I/O handle */ ddi_acc_handle_t io_handle; /* DDI I/O handle */ void *io_regs; /* mapped registers */ + ddi_acc_handle_t ape_handle; /* DDI I/O handle */ + void *ape_regs; /* mapped registers */ + boolean_t ape_enabled; + boolean_t ape_has_ncsi; + ddi_periodic_t periodic_id; /* periodical callback */ ddi_softintr_t factotum_id; /* factotum callback */ ddi_softintr_t drain_id; /* reschedule callback */ @@ -784,6 +814,8 @@ typedef struct bge { recv_ring_t recv[BGE_RECV_RINGS_MAX]; /* 16*0x0090 */ send_ring_t send[BGE_SEND_RINGS_MAX]; /* 16*0x0100 */ + mac_resource_handle_t macRxResourceHandles[BGE_RECV_RINGS_MAX]; + /* * Locks: * @@ -884,6 +916,7 @@ typedef struct bge { uint64_t tx_resched; uint32_t factotum_flag; /* softint pending */ uintptr_t pagemask; + boolean_t rdma_length_bug_on_5719; /* * NDD parameters (protected by genlock) @@ -953,12 +986,13 @@ typedef struct bge { uint32_t param_drain_max; uint64_t param_link_speed; link_duplex_t param_link_duplex; + uint32_t eee_lpi_wait; - - uint32_t link_update_timer; uint64_t timestamp; } bge_t; +#define CATC_TRIGGER(bgep, data) bge_reg_put32(bgep, 0x0a00, (data)) + /* * 'Progress' bit flags ... */ @@ -1031,35 +1065,31 @@ typedef struct bge { */ #define BGE_DBG_STOP 0x00000001 /* early debug_enter() */ #define BGE_DBG_TRACE 0x00000002 /* general flow tracing */ - +#define BGE_DBG_APE 0x00000004 /* low-level APE access */ +#define BGE_DBG_HPSD 0x00000008 /* low-level HPSD access*/ #define BGE_DBG_REGS 0x00000010 /* low-level accesses */ #define BGE_DBG_MII 0x00000020 /* low-level MII access */ #define BGE_DBG_SEEPROM 0x00000040 /* low-level SEEPROM IO */ #define BGE_DBG_CHIP 0x00000080 /* low(ish)-level code */ - #define BGE_DBG_RECV 0x00000100 /* receive-side code */ #define BGE_DBG_SEND 0x00000200 /* packet-send code */ - #define BGE_DBG_INT 0x00001000 /* interrupt handler */ #define BGE_DBG_FACT 0x00002000 /* factotum (softint) */ - #define BGE_DBG_PHY 0x00010000 /* Copper PHY code */ #define BGE_DBG_SERDES 0x00020000 /* SerDes code */ #define BGE_DBG_PHYS 0x00040000 /* Physical layer code */ #define BGE_DBG_LINK 0x00080000 /* Link status check */ - #define BGE_DBG_INIT 0x00100000 /* initialisation */ #define BGE_DBG_NEMO 0x00200000 /* nemo interaction */ #define BGE_DBG_ADDR 0x00400000 /* address-setting code */ #define BGE_DBG_STATS 0x00800000 /* statistics */ - #define BGE_DBG_IOCTL 0x01000000 /* ioctl handling */ #define BGE_DBG_LOOP 0x02000000 /* loopback ioctl code */ #define BGE_DBG_PPIO 0x04000000 /* Peek/poke ioctls */ #define BGE_DBG_BADIOC 0x08000000 /* unknown ioctls */ - #define BGE_DBG_MCTL 0x10000000 /* mctl (csum) code */ #define BGE_DBG_NDD 0x20000000 /* NDD operations */ +#define BGE_DBG_MEM 0x40000000 /* memory allocations and chunking */ /* * Debugging ... @@ -1067,7 +1097,7 @@ typedef struct bge { #ifdef DEBUG #define BGE_DEBUGGING 1 #else -#define BGE_DEBUGGING 0 +#define BGE_DEBUGGING 1 #endif /* DEBUG */ @@ -1107,6 +1137,9 @@ typedef struct bge { #define BGE_LDB(b, args) BGE_XDB(b, bgep->debug, (*bge_db(bgep)), args) #define BGE_CDB(f, args) BGE_XDB(BGE_DBG, bgep->debug, f, args) +#define DEVNAME(_sc) ((_sc)->ifname) +#define DPRINTF(f, ...) do { cmn_err(CE_NOTE, (f), __VA_ARGS__); } while (0) + /* * Conditional-print macros. * @@ -1147,11 +1180,20 @@ typedef struct bge { /* bge_chip.c */ uint16_t bge_mii_get16(bge_t *bgep, bge_regno_t regno); void bge_mii_put16(bge_t *bgep, bge_regno_t regno, uint16_t value); +uint16_t bge_phydsp_read(bge_t *bgep, bge_regno_t regno); +void bge_phydsp_write(bge_t *bgep, bge_regno_t regno, uint16_t value); uint32_t bge_reg_get32(bge_t *bgep, bge_regno_t regno); void bge_reg_put32(bge_t *bgep, bge_regno_t regno, uint32_t value); void bge_reg_set32(bge_t *bgep, bge_regno_t regno, uint32_t bits); void bge_reg_clr32(bge_t *bgep, bge_regno_t regno, uint32_t bits); +uint32_t bge_ape_get32(bge_t *bgep, bge_regno_t regno); +void bge_ape_put32(bge_t *bgep, bge_regno_t regno, uint32_t value); void bge_mbx_put(bge_t *bgep, bge_regno_t regno, uint64_t value); +void bge_ape_lock_init(bge_t *bgep); +int bge_ape_scratchpad_read(bge_t *bgep, uint32_t *data, uint32_t base_off, uint32_t lenToRead); +int bge_ape_scratchpad_write(bge_t *bgep, uint32_t dstoff, uint32_t *data, uint32_t lenToWrite); +int bge_nvmem_read32(bge_t *bgep, bge_regno_t addr, uint32_t *dp); +int bge_nvmem_write32(bge_t *bgep, bge_regno_t addr, uint32_t *dp); void bge_chip_cfg_init(bge_t *bgep, chip_id_t *cidp, boolean_t enable_dma); int bge_chip_id_init(bge_t *bgep); void bge_chip_coalesce_update(bge_t *bgep); @@ -1231,7 +1273,9 @@ void bge_intr_enable(bge_t *bgep); void bge_intr_disable(bge_t *bgep); int bge_reprogram(bge_t *); -/* bge_phys.c */ +/* bge_mii.c */ +void bge_eee_init(bge_t *bgep); +void bge_eee_enable(bge_t * bgep); int bge_phys_init(bge_t *bgep); void bge_phys_reset(bge_t *bgep); int bge_phys_idle(bge_t *bgep); @@ -1284,9 +1328,6 @@ void bge_adj_volt_5906(bge_t *bgep); #define BGE_ASF_HEARTBEAT_INTERVAL 1500000 -#define BGE_LINK_UPDATE_TIMEOUT 10 /* ~ 5 sec */ -#define BGE_LINK_UPDATE_DONE (BGE_LINK_UPDATE_TIMEOUT+1) - #ifdef __cplusplus } #endif diff --git a/usr/src/uts/common/io/bge/bge_kstats.c b/usr/src/uts/common/io/bge/bge_kstats.c index c10a1b4601..9df359507e 100644 --- a/usr/src/uts/common/io/bge/bge_kstats.c +++ b/usr/src/uts/common/io/bge/bge_kstats.c @@ -20,6 +20,11 @@ */ /* + * Copyright (c) 2010-2013, by Broadcom, Inc. + * All Rights Reserved. + */ + +/* * Copyright 2010 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ @@ -286,6 +291,10 @@ static const bge_ksindex_t bge_chipid[] = { { 18, "&supported" }, { 19, "&interface" }, + { 20, "nvtype" }, + + { 21, "asic_rev_prod_id" }, + { -1, NULL } }; @@ -343,6 +352,13 @@ bge_chipid_update(kstat_t *ksp, int flag) bge_set_char_kstat(knp++, tmp & CHIP_FLAG_SERDES ? "serdes" : "copper"); + (knp++)->value.ui64 = + ((bgep->chipid.nvtype == BGE_NVTYPE_NONE) || + (bgep->chipid.nvtype == BGE_NVTYPE_UNKNOWN)) ? + 0 : bgep->chipid.nvtype; + + (knp++)->value.ui64 = bgep->chipid.asic_rev_prod_id; + return (0); } @@ -499,6 +515,7 @@ static const bge_ksindex_t bge_phydata[] = { { MII_INTR_STATUS, "intr_status" }, { MII_INTR_MASK, "intr_mask" }, { MII_HCD_STATUS, "hcd_status" }, + { EEE_MODE_REG, "eee" }, { -1, NULL } }; @@ -540,6 +557,16 @@ bge_phydata_update(kstat_t *ksp, int flag) knp->value.ui64 |= bge_mii_get16(bgep, MII_PHYIDL); break; + case EEE_MODE_REG: + knp->value.ui64 = 0; + if (bgep->link_state == LINK_STATE_UP) + { + knp->value.ui64 = + (bge_reg_get32(bgep, EEE_MODE_REG) & 0x80) ? + 1 : 0; + } + break; + default: knp->value.ui64 = bge_mii_get16(bgep, ksip->index); break; @@ -567,7 +594,7 @@ bge_setup_named_kstat(bge_t *bgep, int instance, char *name, size /= sizeof (bge_ksindex_t); ksp = kstat_create(BGE_DRIVER_NAME, instance, name, "net", - KSTAT_TYPE_NAMED, size-1, KSTAT_FLAG_PERSISTENT); + KSTAT_TYPE_NAMED, size-1, 0); if (ksp == NULL) return (NULL); @@ -663,7 +690,7 @@ bge_m_stat(void *arg, uint_t stat, uint64_t *val) bge_statistics_t *bstp; bge_statistics_reg_t *pstats; - if (bgep->bge_chip_state == BGE_CHIP_FAULT) { + if (bgep->bge_chip_state != BGE_CHIP_RUNNING) { return (EINVAL); } @@ -735,7 +762,8 @@ bge_m_stat(void *arg, uint_t stat, uint64_t *val) switch (stat) { case MAC_STAT_IFSPEED: - *val = bgep->param_link_speed * 1000000ull; + *val = (bgep->link_state != LINK_STATE_UNKNOWN) ? + (bgep->param_link_speed * 1000000ull) : 0; break; case MAC_STAT_MULTIRCV: @@ -916,12 +944,14 @@ bge_m_stat(void *arg, uint_t stat, uint64_t *val) *val = pstats->dot3StatsFrameTooLongs; break; +#if (MAC_VERSION > 1) case ETHER_STAT_TOOSHORT_ERRORS: if (bgep->chipid.statistic_type == BGE_STAT_BLK) *val = bstp->s.etherStatsUndersizePkts; else *val = pstats->etherStatsUndersizePkts; break; +#endif case ETHER_STAT_XCVR_ADDR: *val = bgep->phy_mii_addr; @@ -994,9 +1024,11 @@ bge_m_stat(void *arg, uint_t stat, uint64_t *val) *val = 1; break; +#if (MAC_VERSION > 1) case ETHER_STAT_CAP_REMFAULT: *val = 1; break; +#endif case ETHER_STAT_ADV_CAP_1000FDX: *val = bgep->param_adv_1000fdx; @@ -1034,6 +1066,7 @@ bge_m_stat(void *arg, uint_t stat, uint64_t *val) *val = bgep->param_adv_autoneg; break; +#if (MAC_VERSION > 1) case ETHER_STAT_ADV_REMFAULT: if (bgep->chipid.flags & CHIP_FLAG_SERDES) *val = 0; @@ -1049,6 +1082,7 @@ bge_m_stat(void *arg, uint_t stat, uint64_t *val) mutex_exit(bgep->genlock); } break; +#endif case ETHER_STAT_LP_CAP_1000FDX: *val = bgep->param_lp_1000fdx; @@ -1086,6 +1120,7 @@ bge_m_stat(void *arg, uint_t stat, uint64_t *val) *val = bgep->param_lp_autoneg; break; +#if (MAC_VERSION > 1) case ETHER_STAT_LP_REMFAULT: if (bgep->chipid.flags & CHIP_FLAG_SERDES) *val = 0; @@ -1101,6 +1136,7 @@ bge_m_stat(void *arg, uint_t stat, uint64_t *val) mutex_exit(bgep->genlock); } break; +#endif case ETHER_STAT_LINK_ASMPAUSE: *val = bgep->param_adv_asym_pause && @@ -1117,7 +1153,8 @@ bge_m_stat(void *arg, uint_t stat, uint64_t *val) break; case ETHER_STAT_LINK_DUPLEX: - *val = bgep->param_link_duplex; + *val = (bgep->link_state != LINK_STATE_UNKNOWN) ? + bgep->param_link_duplex : LINK_DUPLEX_UNKNOWN; break; default: diff --git a/usr/src/uts/common/io/bge/bge_lint.c b/usr/src/uts/common/io/bge/bge_lint.c new file mode 100644 index 0000000000..2f51987068 --- /dev/null +++ b/usr/src/uts/common/io/bge/bge_lint.c @@ -0,0 +1,18 @@ +/* + * This file and its contents are supplied under the terms of the + * Common Development and Distribution License ("CDDL"), version 1.0. + * You may only use this file in accordance with the terms of version + * 1.0 of the CDDL. + * + * A full copy of the text of the CDDL should have accompanied this + * source. A copy of the CDDL is also available via the Internet at + * http://www.illumos.org/license/CDDL. + */ + +/* + * This is a dummy lint file to pacify lint for bge, which due to its upstream, + * makes it, unfortunately, not realistic to lint. We have a dummy definition to + * ensure that we don't trigger lint's empty translation unit. + */ + +extern int bge_lint; diff --git a/usr/src/uts/common/io/bge/bge_main2.c b/usr/src/uts/common/io/bge/bge_main2.c index f191f313c0..bd7e883ca1 100644 --- a/usr/src/uts/common/io/bge/bge_main2.c +++ b/usr/src/uts/common/io/bge/bge_main2.c @@ -20,7 +20,13 @@ */ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010-2013, by Broadcom, Inc. + * All Rights Reserved. + */ + +/* + * Copyright (c) 2002, 2010, Oracle and/or its affiliates. + * All rights reserved. */ #include "bge_impl.h" @@ -29,6 +35,12 @@ #include <sys/mac.h> #include <sys/mac_flow.h> + +#ifndef STRINGIFY +#define XSTRINGIFY(x) #x +#define STRINGIFY(x) XSTRINGIFY(x) +#endif + /* * This is the string displayed by modinfo, etc. */ @@ -47,18 +59,21 @@ static char subdev_propname[] = "subsystem-id"; static char subven_propname[] = "subsystem-vendor-id"; static char rxrings_propname[] = "bge-rx-rings"; static char txrings_propname[] = "bge-tx-rings"; +static char eee_propname[] = "bge-eee"; static char fm_cap[] = "fm-capable"; static char default_mtu[] = "default_mtu"; static int bge_add_intrs(bge_t *, int); static void bge_rem_intrs(bge_t *); static int bge_unicst_set(void *, const uint8_t *, int); +static int bge_addmac(void *, const uint8_t *); +static int bge_remmac(void *, const uint8_t *); /* * Describes the chip's DMA engine */ static ddi_dma_attr_t dma_attr = { - DMA_ATTR_V0, /* dma_attr version */ + DMA_ATTR_V0, /* dma_attr_version */ 0x0000000000000000ull, /* dma_attr_addr_lo */ 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */ 0x00000000FFFFFFFFull, /* dma_attr_count_max */ @@ -66,7 +81,7 @@ static ddi_dma_attr_t dma_attr = { 0x00000FFF, /* dma_attr_burstsizes */ 0x00000001, /* dma_attr_minxfer */ 0x000000000000FFFFull, /* dma_attr_maxxfer */ - 0xFFFFFFFFFFFFFFFFull, /* dma_attr_seg */ + 0x00000000FFFFFFFFull, /* dma_attr_seg */ 1, /* dma_attr_sgllen */ 0x00000001, /* dma_attr_granular */ DDI_DMA_FLAGERR /* dma_attr_flags */ @@ -103,7 +118,9 @@ static ddi_device_acc_attr_t bge_data_accattr = { static int bge_m_start(void *); static void bge_m_stop(void *); static int bge_m_promisc(void *, boolean_t); +static int bge_m_unicst(void * pArg, const uint8_t *); static int bge_m_multicst(void *, boolean_t, const uint8_t *); +static void bge_m_resources(void * arg); static void bge_m_ioctl(void *, queue_t *, mblk_t *); static boolean_t bge_m_getcapab(void *, mac_capab_t, void *); static int bge_unicst_set(void *, const uint8_t *, @@ -121,26 +138,48 @@ static int bge_get_priv_prop(bge_t *, const char *, uint_t, static void bge_priv_propinfo(const char *, mac_prop_info_handle_t); -#define BGE_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | \ - MC_GETPROP | MC_PROPINFO) - static mac_callbacks_t bge_m_callbacks = { - BGE_M_CALLBACK_FLAGS, + MC_IOCTL +#ifdef MC_RESOURCES + | MC_RESOURCES +#endif +#ifdef MC_SETPROP + | MC_SETPROP +#endif +#ifdef MC_GETPROP + | MC_GETPROP +#endif +#ifdef MC_PROPINFO + | MC_PROPINFO +#endif + | MC_GETCAPAB, bge_m_stat, bge_m_start, bge_m_stop, bge_m_promisc, bge_m_multicst, - NULL, + bge_m_unicst, bge_m_tx, +#ifdef MC_RESOURCES + bge_m_resources, +#else NULL, +#endif bge_m_ioctl, bge_m_getcapab, +#ifdef MC_OPEN NULL, NULL, +#endif +#ifdef MC_SETPROP bge_m_setprop, +#endif +#ifdef MC_GETPROP bge_m_getprop, +#endif +#ifdef MC_PROPINFO bge_m_propinfo +#endif }; char *bge_priv_prop[] = { @@ -489,7 +528,6 @@ bge_m_stop(void *arg) } else bge_stop(bgep); - bgep->link_update_timer = 0; bgep->link_state = LINK_STATE_UNKNOWN; mac_link_update(bgep->mh, bgep->link_state); @@ -542,6 +580,11 @@ bge_m_start(void *arg) if ((bgep->asf_status == ASF_STAT_RUN) && (bgep->asf_pseudostop)) { bgep->bge_mac_state = BGE_MAC_STARTED; + /* forcing a mac link update here */ + bge_phys_check(bgep); + bgep->link_state = (bgep->param_link_up) ? LINK_STATE_UP : + LINK_STATE_DOWN; + mac_link_update(bgep->mh, bgep->link_state); mutex_exit(bgep->genlock); return (0); } @@ -601,7 +644,7 @@ bge_unicst_set(void *arg, const uint8_t *macaddr, int slot) { bge_t *bgep = arg; /* private device info */ - BGE_TRACE(("bge_m_unicst_set($%p, %s)", arg, + BGE_TRACE(("bge_unicst_set($%p, %s)", arg, ether_sprintf((void *)macaddr))); /* * Remember the new current address in the driver state @@ -668,7 +711,7 @@ bge_unicst_set(void *arg, const uint8_t *macaddr, int slot) } } #endif - BGE_DEBUG(("bge_m_unicst_set($%p) done", arg)); + BGE_DEBUG(("bge_unicst_set($%p) done", arg)); if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); mutex_exit(bgep->genlock); @@ -1244,6 +1287,21 @@ bge_priv_propinfo(const char *pr_name, mac_prop_info_handle_t mph) mac_prop_info_set_default_str(mph, valstr); } + +static int +bge_m_unicst(void * arg, const uint8_t * mac_addr) +{ + bge_t *bgep = arg; + int i; + + /* XXX sets the mac address for all ring slots... OK? */ + for (i = 0; i < MIN(bgep->chipid.rx_rings, MAC_ADDRESS_REGS_MAX); i++) + bge_addmac(&bgep->recv[i], mac_addr); + + return (0); +} + + /* * Compute the index of the required bit in the multicast hash map. * This must mirror the way the hardware actually does it! @@ -1404,6 +1462,37 @@ bge_m_promisc(void *arg, boolean_t on) return (0); } +#ifdef MC_RESOURCES + +static void +bge_blank(void * arg, time_t tick_cnt, uint_t pkt_cnt) +{ + (void)arg; + (void)tick_cnt; + (void)pkt_cnt; +} + +static void +bge_m_resources(void * arg) +{ + bge_t *bgep = arg; + mac_rx_fifo_t mrf; + int i; + + mrf.mrf_type = MAC_RX_FIFO; + mrf.mrf_blank = bge_blank; + mrf.mrf_arg = (void *)bgep; + mrf.mrf_normal_blank_time = 25; + mrf.mrf_normal_pkt_count = 8; + + for (i = 0; i < BGE_RECV_RINGS_MAX; i++) { + bgep->macRxResourceHandles[i] = + mac_resource_add(bgep->mh, (mac_resource_t *)&mrf); + } +} + +#endif /* MC_RESOURCES */ + /* * Find the slot for the specified unicast address */ @@ -1427,7 +1516,7 @@ bge_unicst_find(bge_t *bgep, const uint8_t *mac_addr) * specified ring 'arg'. */ static int -bge_addmac(void *arg, const uint8_t *mac_addr) +bge_addmac(void *arg, const uint8_t * mac_addr) { recv_ring_t *rrp = (recv_ring_t *)arg; bge_t *bgep = rrp->bgep; @@ -1579,8 +1668,9 @@ bge_remmac(void *arg, const uint8_t *mac_addr) return (0); } + static int -bge_flag_intr_enable(mac_intr_handle_t ih) +bge_flag_intr_enable(mac_ring_driver_t ih) { recv_ring_t *rrp = (recv_ring_t *)ih; bge_t *bgep = rrp->bgep; @@ -1593,7 +1683,7 @@ bge_flag_intr_enable(mac_intr_handle_t ih) } static int -bge_flag_intr_disable(mac_intr_handle_t ih) +bge_flag_intr_disable(mac_ring_driver_t ih) { recv_ring_t *rrp = (recv_ring_t *)ih; bge_t *bgep = rrp->bgep; @@ -1645,9 +1735,8 @@ bge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, infop->mri_stat = bge_rx_ring_stat; mintr = &infop->mri_intr; - mintr->mi_handle = (mac_intr_handle_t)rx_ring; - mintr->mi_enable = bge_flag_intr_enable; - mintr->mi_disable = bge_flag_intr_disable; + mintr->mi_enable = (mac_intr_enable_t)bge_flag_intr_enable; + mintr->mi_disable = (mac_intr_disable_t)bge_flag_intr_disable; break; } @@ -1666,7 +1755,7 @@ bge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, */ void bge_fill_group(void *arg, mac_ring_type_t rtype, const int rg_index, - mac_group_info_t *infop, mac_group_handle_t gh) + mac_group_info_t * infop, mac_group_handle_t gh) { bge_t *bgep = arg; @@ -1694,11 +1783,13 @@ bge_fill_group(void *arg, mac_ring_type_t rtype, const int rg_index, } } + /*ARGSUSED*/ static boolean_t bge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) { bge_t *bgep = arg; + mac_capab_rings_t *cap_rings; switch (cap) { case MAC_CAPAB_HCKSUM: { @@ -1707,26 +1798,30 @@ bge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) *txflags = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM; break; } - case MAC_CAPAB_RINGS: { - mac_capab_rings_t *cap_rings = cap_data; + + case MAC_CAPAB_RINGS: + cap_rings = (mac_capab_rings_t *)cap_data; /* Temporarily disable multiple tx rings. */ if (cap_rings->mr_type != MAC_RING_TYPE_RX) return (B_FALSE); cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC; - cap_rings->mr_rnum = cap_rings->mr_gnum = + cap_rings->mr_rnum = + cap_rings->mr_gnum = MIN(bgep->chipid.rx_rings, MAC_ADDRESS_REGS_MAX); cap_rings->mr_rget = bge_fill_ring; cap_rings->mr_gget = bge_fill_group; break; - } + default: return (B_FALSE); } return (B_TRUE); } +#ifdef NOT_SUPPORTED_XXX + /* * Loopback ioctl code */ @@ -1827,6 +1922,8 @@ bge_loop_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp) } } +#endif /* NOT_SUPPORTED_XXX */ + /* * Specific bge IOCTLs, the gld module handles the generic ones. */ @@ -1866,6 +1963,7 @@ bge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) case BGE_HARD_RESET: break; +#ifdef NOT_SUPPORTED_XXX case LB_GET_INFO_SIZE: case LB_GET_INFO: case LB_GET_MODE: @@ -1873,6 +1971,7 @@ bge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) /* FALLTHRU */ case LB_SET_MODE: break; +#endif } @@ -1916,12 +2015,14 @@ bge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) status = bge_chip_ioctl(bgep, wq, mp, iocp); break; +#ifdef NOT_SUPPORTED_XXX case LB_GET_INFO_SIZE: case LB_GET_INFO: case LB_GET_MODE: case LB_SET_MODE: status = bge_loop_ioctl(bgep, wq, mp, iocp); break; +#endif } @@ -1994,7 +2095,7 @@ bge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) */ #undef BGE_DBG -#define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ +#define BGE_DBG BGE_DBG_MEM /* debug flag for this code */ /* * Allocate an area of memory and a DMA handle for accessing it */ @@ -2588,8 +2689,11 @@ bge_alloc_bufs(bge_t *bgep) /* * Enable PCI relaxed ordering only for RX/TX data buffers */ - if (bge_relaxed_ordering) - dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING; + if (!(DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep))) { + if (bge_relaxed_ordering) + dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING; + } /* * Allocate memory & handles for RX buffers @@ -2602,6 +2706,9 @@ bge_alloc_bufs(bge_t *bgep) if (err != DDI_SUCCESS) return (DDI_FAILURE); } + BGE_DEBUG(("DMA ALLOC: allocated %d chunks for Rx Buffers (rxbuffsize = %d)", + rxbuffsize/BGE_SPLIT, + rxbuffsize)); /* * Allocate memory & handles for TX buffers @@ -2614,8 +2721,15 @@ bge_alloc_bufs(bge_t *bgep) if (err != DDI_SUCCESS) return (DDI_FAILURE); } + BGE_DEBUG(("DMA ALLOC: allocated %d chunks for Tx Buffers (txbuffsize = %d)", + txbuffsize/BGE_SPLIT, + txbuffsize)); - dma_attr.dma_attr_flags &= ~DDI_DMA_RELAXED_ORDERING; + if (!(DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep))) { + /* no relaxed ordering for descriptors rings? */ + dma_attr.dma_attr_flags &= ~DDI_DMA_RELAXED_ORDERING; + } /* * Allocate memory & handles for receive return rings @@ -2628,14 +2742,21 @@ bge_alloc_bufs(bge_t *bgep) if (err != DDI_SUCCESS) return (DDI_FAILURE); } + BGE_DEBUG(("DMA ALLOC: allocated %d chunks for Rx Descs cons (rx_rings = %d, rxdescsize = %d)", + rxdescsize/rx_rings, + rx_rings, + rxdescsize)); /* - * Allocate memory & handles for buffer (producer) descriptor rings + * Allocate memory & handles for buffer (producer) descriptor rings. + * Note that split=rx_rings. */ err = bge_alloc_dma_mem(bgep, rxbuffdescsize, &bge_desc_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->rx_desc[split]); if (err != DDI_SUCCESS) return (DDI_FAILURE); + BGE_DEBUG(("DMA ALLOC: allocated 1 chunks for Rx Descs prod (rxbuffdescsize = %d)", + rxdescsize)); /* * Allocate memory & handles for TX descriptor rings, @@ -2645,65 +2766,260 @@ bge_alloc_bufs(bge_t *bgep) DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->tx_desc); if (err != DDI_SUCCESS) return (DDI_FAILURE); + BGE_DEBUG(("DMA ALLOC: allocated 1 chunks for Tx Descs / Status Block / Stats (txdescdize = %d)", + txdescsize)); /* * Now carve up each of the allocated areas ... */ + + /* rx buffers */ for (split = 0; split < BGE_SPLIT; ++split) { area = bgep->rx_buff[split]; + + BGE_DEBUG(("RXB CHNK %d INIT: va=%p alen=%d off=%d pa=%llx psz=%d", + split, + area.mem_va, + area.alength, + area.offset, + area.cookie.dmac_laddress, + area.cookie.dmac_size)); + bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].buf[split], &area, BGE_STD_SLOTS_USED/BGE_SPLIT, bgep->chipid.std_buf_size); + + BGE_DEBUG(("RXB SLCE %d STND: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", + split, + bgep->buff[BGE_STD_BUFF_RING].buf[split].mem_va, + bgep->buff[BGE_STD_BUFF_RING].buf[split].alength, + bgep->buff[BGE_STD_BUFF_RING].buf[split].offset, + bgep->buff[BGE_STD_BUFF_RING].buf[split].cookie.dmac_laddress, + bgep->buff[BGE_STD_BUFF_RING].buf[split].cookie.dmac_size, + BGE_STD_SLOTS_USED/BGE_SPLIT, + bgep->chipid.std_buf_size)); + bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].buf[split], &area, bgep->chipid.jumbo_slots/BGE_SPLIT, bgep->chipid.recv_jumbo_size); + + if ((bgep->chipid.jumbo_slots / BGE_SPLIT) > 0) + { + BGE_DEBUG(("RXB SLCE %d JUMB: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", + split, + bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].mem_va, + bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].alength, + bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].offset, + bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].cookie.dmac_laddress, + bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].cookie.dmac_size, + bgep->chipid.jumbo_slots/BGE_SPLIT, + bgep->chipid.recv_jumbo_size)); + } + bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].buf[split], &area, BGE_MINI_SLOTS_USED/BGE_SPLIT, BGE_MINI_BUFF_SIZE); + + if ((BGE_MINI_SLOTS_USED / BGE_SPLIT) > 0) + { + BGE_DEBUG(("RXB SLCE %d MINI: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", + split, + bgep->buff[BGE_MINI_BUFF_RING].buf[split].mem_va, + bgep->buff[BGE_MINI_BUFF_RING].buf[split].alength, + bgep->buff[BGE_MINI_BUFF_RING].buf[split].offset, + bgep->buff[BGE_MINI_BUFF_RING].buf[split].cookie.dmac_laddress, + bgep->buff[BGE_MINI_BUFF_RING].buf[split].cookie.dmac_size, + BGE_MINI_SLOTS_USED/BGE_SPLIT, + BGE_MINI_BUFF_SIZE)); + } + + BGE_DEBUG(("RXB CHNK %d DONE: va=%p alen=%d off=%d pa=%llx psz=%d", + split, + area.mem_va, + area.alength, + area.offset, + area.cookie.dmac_laddress, + area.cookie.dmac_size)); } + /* tx buffers */ for (split = 0; split < BGE_SPLIT; ++split) { area = bgep->tx_buff[split]; - for (ring = 0; ring < tx_rings; ++ring) + + BGE_DEBUG(("TXB CHNK %d INIT: va=%p alen=%d off=%d pa=%llx psz=%d", + split, + area.mem_va, + area.alength, + area.offset, + area.cookie.dmac_laddress, + area.cookie.dmac_size)); + + for (ring = 0; ring < tx_rings; ++ring) { bge_slice_chunk(&bgep->send[ring].buf[0][split], &area, BGE_SEND_BUF_NUM/BGE_SPLIT, bgep->chipid.snd_buff_size); - for (; ring < BGE_SEND_RINGS_MAX; ++ring) + + BGE_DEBUG(("TXB SLCE %d RING %d: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", + split, ring, + bgep->send[ring].buf[0][split].mem_va, + bgep->send[ring].buf[0][split].alength, + bgep->send[ring].buf[0][split].offset, + bgep->send[ring].buf[0][split].cookie.dmac_laddress, + bgep->send[ring].buf[0][split].cookie.dmac_size, + BGE_SEND_BUF_NUM/BGE_SPLIT, + bgep->chipid.snd_buff_size)); + } + + for (; ring < BGE_SEND_RINGS_MAX; ++ring) { bge_slice_chunk(&bgep->send[ring].buf[0][split], &area, 0, bgep->chipid.snd_buff_size); + } + + BGE_DEBUG(("TXB CHNK %d DONE: va=%p alen=%d off=%d pa=%llx psz=%d", + split, + area.mem_va, + area.alength, + area.offset, + area.cookie.dmac_laddress, + area.cookie.dmac_size)); } - for (ring = 0; ring < rx_rings; ++ring) + for (ring = 0; ring < rx_rings; ++ring) { bge_slice_chunk(&bgep->recv[ring].desc, &bgep->rx_desc[ring], bgep->chipid.recv_slots, sizeof (bge_rbd_t)); - area = bgep->rx_desc[rx_rings]; - for (; ring < BGE_RECV_RINGS_MAX; ++ring) + BGE_DEBUG(("RXD CONS RING %d: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", + ring, + bgep->recv[ring].desc.mem_va, + bgep->recv[ring].desc.alength, + bgep->recv[ring].desc.offset, + bgep->recv[ring].desc.cookie.dmac_laddress, + bgep->recv[ring].desc.cookie.dmac_size, + bgep->chipid.recv_slots, + sizeof(bge_rbd_t))); + } + + /* dma alloc for rxbuffdescsize is located at bgep->rx_desc[#rings] */ + area = bgep->rx_desc[rx_rings]; /* note rx_rings = one beyond rings */ + + for (; ring < BGE_RECV_RINGS_MAX; ++ring) /* skip unused rings */ bge_slice_chunk(&bgep->recv[ring].desc, &area, 0, sizeof (bge_rbd_t)); + + BGE_DEBUG(("RXD PROD INIT: va=%p alen=%d off=%d pa=%llx psz=%d", + area.mem_va, + area.alength, + area.offset, + area.cookie.dmac_laddress, + area.cookie.dmac_size)); + bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].desc, &area, BGE_STD_SLOTS_USED, sizeof (bge_rbd_t)); + BGE_DEBUG(("RXD PROD STND: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", + bgep->buff[BGE_STD_BUFF_RING].desc.mem_va, + bgep->buff[BGE_STD_BUFF_RING].desc.alength, + bgep->buff[BGE_STD_BUFF_RING].desc.offset, + bgep->buff[BGE_STD_BUFF_RING].desc.cookie.dmac_laddress, + bgep->buff[BGE_STD_BUFF_RING].desc.cookie.dmac_size, + BGE_STD_SLOTS_USED, + sizeof(bge_rbd_t))); + bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].desc, &area, bgep->chipid.jumbo_slots, sizeof (bge_rbd_t)); + BGE_DEBUG(("RXD PROD JUMB: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", + bgep->buff[BGE_JUMBO_BUFF_RING].desc.mem_va, + bgep->buff[BGE_JUMBO_BUFF_RING].desc.alength, + bgep->buff[BGE_JUMBO_BUFF_RING].desc.offset, + bgep->buff[BGE_JUMBO_BUFF_RING].desc.cookie.dmac_laddress, + bgep->buff[BGE_JUMBO_BUFF_RING].desc.cookie.dmac_size, + bgep->chipid.jumbo_slots, + sizeof(bge_rbd_t))); + bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].desc, &area, BGE_MINI_SLOTS_USED, sizeof (bge_rbd_t)); + BGE_DEBUG(("RXD PROD MINI: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", + bgep->buff[BGE_MINI_BUFF_RING].desc.mem_va, + bgep->buff[BGE_MINI_BUFF_RING].desc.alength, + bgep->buff[BGE_MINI_BUFF_RING].desc.offset, + bgep->buff[BGE_MINI_BUFF_RING].desc.cookie.dmac_laddress, + bgep->buff[BGE_MINI_BUFF_RING].desc.cookie.dmac_size, + BGE_MINI_SLOTS_USED, + sizeof(bge_rbd_t))); + + BGE_DEBUG(("RXD PROD DONE: va=%p alen=%d off=%d pa=%llx psz=%d", + area.mem_va, + area.alength, + area.offset, + area.cookie.dmac_laddress, + area.cookie.dmac_size)); + ASSERT(area.alength == 0); area = bgep->tx_desc; - for (ring = 0; ring < tx_rings; ++ring) + + BGE_DEBUG(("TXD INIT: va=%p alen=%d off=%d pa=%llx psz=%d", + area.mem_va, + area.alength, + area.offset, + area.cookie.dmac_laddress, + area.cookie.dmac_size)); + + for (ring = 0; ring < tx_rings; ++ring) { bge_slice_chunk(&bgep->send[ring].desc, &area, BGE_SEND_SLOTS_USED, sizeof (bge_sbd_t)); - for (; ring < BGE_SEND_RINGS_MAX; ++ring) + + BGE_DEBUG(("TXD RING %d: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", + ring, + bgep->send[ring].desc.mem_va, + bgep->send[ring].desc.alength, + bgep->send[ring].desc.offset, + bgep->send[ring].desc.cookie.dmac_laddress, + bgep->send[ring].desc.cookie.dmac_size, + BGE_SEND_SLOTS_USED, + sizeof(bge_sbd_t))); + } + + for (; ring < BGE_SEND_RINGS_MAX; ++ring) /* skip unused rings */ bge_slice_chunk(&bgep->send[ring].desc, &area, 0, sizeof (bge_sbd_t)); + bge_slice_chunk(&bgep->statistics, &area, 1, sizeof (bge_statistics_t)); + BGE_DEBUG(("TXD STATISTICS: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", + bgep->statistics.mem_va, + bgep->statistics.alength, + bgep->statistics.offset, + bgep->statistics.cookie.dmac_laddress, + bgep->statistics.cookie.dmac_size, + 1, + sizeof(bge_statistics_t))); + bge_slice_chunk(&bgep->status_block, &area, 1, sizeof (bge_status_t)); + BGE_DEBUG(("TXD STATUS BLOCK: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", + bgep->status_block.mem_va, + bgep->status_block.alength, + bgep->status_block.offset, + bgep->status_block.cookie.dmac_laddress, + bgep->status_block.cookie.dmac_size, + 1, + sizeof(bge_status_t))); + + BGE_DEBUG(("TXD DONE: va=%p alen=%d off=%d pa=%llx psz=%d", + area.mem_va, + area.alength, + area.offset, + area.cookie.dmac_laddress, + area.cookie.dmac_size)); + ASSERT(area.alength == BGE_STATUS_PADDING); + DMA_ZERO(bgep->status_block); return (DDI_SUCCESS); } +#undef BGE_DBG +#define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ + /* * This routine frees the transmit and receive buffers and descriptors. * Make sure the chip is stopped before calling it! @@ -2840,7 +3156,6 @@ bge_find_mac_address(bge_t *bgep, chip_id_t *cidp) cidp->vendor_addr.set ? "" : "not ")); } - /*ARGSUSED*/ int bge_check_acc_handle(bge_t *bgep, ddi_acc_handle_t handle) @@ -2961,6 +3276,7 @@ bge_unattach(bge_t *bgep) ddi_periodic_delete(bgep->periodic_id); bgep->periodic_id = NULL; } + if (bgep->progress & PROGRESS_KSTATS) bge_fini_kstats(bgep); if (bgep->progress & PROGRESS_PHY) @@ -3008,8 +3324,11 @@ bge_unattach(bge_t *bgep) ddi_remove_softintr(bgep->drain_id); if (bgep->progress & PROGRESS_BUFS) bge_free_bufs(bgep); - if (bgep->progress & PROGRESS_REGS) + if (bgep->progress & PROGRESS_REGS) { ddi_regs_map_free(&bgep->io_handle); + if (bgep->ape_enabled) + ddi_regs_map_free(&bgep->ape_handle); + } if (bgep->progress & PROGRESS_CFG) pci_config_teardown(&bgep->cfg_handle); @@ -3093,6 +3412,209 @@ bge_resume(dev_info_t *devinfo) return (DDI_SUCCESS); } +static int +bge_fw_img_is_valid(bge_t *bgep, uint32_t offset) +{ + uint32_t val; + + if (bge_nvmem_read32(bgep, offset, &val) || + (val & 0xfc000000) != 0x0c000000 || + bge_nvmem_read32(bgep, offset + 4, &val) || + val != 0) + return (0); + + return (1); +} + +static void +bge_read_mgmtfw_ver(bge_t *bgep) +{ + uint32_t val; + uint32_t offset; + uint32_t start; + int i, vlen; + + for (offset = NVM_DIR_START; + offset < NVM_DIR_END; + offset += NVM_DIRENT_SIZE) { + if (bge_nvmem_read32(bgep, offset, &val)) + return; + + if ((val >> NVM_DIRTYPE_SHIFT) == NVM_DIRTYPE_ASFINI) + break; + } + + if (offset == NVM_DIR_END) + return; + + if (bge_nvmem_read32(bgep, offset - 4, &start)) + return; + + if (bge_nvmem_read32(bgep, offset + 4, &offset) || + !bge_fw_img_is_valid(bgep, offset) || + bge_nvmem_read32(bgep, offset + 8, &val)) + return; + + offset += val - start; + + vlen = strlen(bgep->fw_version); + + bgep->fw_version[vlen++] = ','; + bgep->fw_version[vlen++] = ' '; + + for (i = 0; i < 4; i++) { + uint32_t v; + + if (bge_nvmem_read32(bgep, offset, &v)) + return; + + v = BE_32(v); + + offset += sizeof(v); + + if (vlen > BGE_FW_VER_SIZE - sizeof(v)) { + memcpy(&bgep->fw_version[vlen], &v, BGE_FW_VER_SIZE - vlen); + break; + } + + memcpy(&bgep->fw_version[vlen], &v, sizeof(v)); + vlen += sizeof(v); + } +} + +static void +bge_read_dash_ver(bge_t *bgep) +{ + int vlen; + uint32_t apedata; + char *fwtype; + + if (!bgep->ape_enabled || !bgep->asf_enabled) + return; + + apedata = bge_ape_get32(bgep, BGE_APE_SEG_SIG); + if (apedata != APE_SEG_SIG_MAGIC) + return; + + apedata = bge_ape_get32(bgep, BGE_APE_FW_STATUS); + if (!(apedata & APE_FW_STATUS_READY)) + return; + + apedata = bge_ape_get32(bgep, BGE_APE_FW_VERSION); + + if (bge_ape_get32(bgep, BGE_APE_FW_FEATURES) & + BGE_APE_FW_FEATURE_NCSI) { + bgep->ape_has_ncsi = B_TRUE; + fwtype = "NCSI"; + } else if ((bgep->chipid.device == DEVICE_ID_5725) || + (bgep->chipid.device == DEVICE_ID_5727)) { + fwtype = "SMASH"; + } else { + fwtype = "DASH"; + } + + vlen = strlen(bgep->fw_version); + + snprintf(&bgep->fw_version[vlen], BGE_FW_VER_SIZE - vlen, + " %s v%d.%d.%d.%d", fwtype, + (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT, + (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT, + (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT, + (apedata & APE_FW_VERSION_BLDMSK)); +} + +static void +bge_read_bc_ver(bge_t *bgep) +{ + uint32_t val; + uint32_t offset; + uint32_t start; + uint32_t ver_offset; + int i, dst_off; + uint32_t major; + uint32_t minor; + boolean_t newver = B_FALSE; + + if (bge_nvmem_read32(bgep, 0xc, &offset) || + bge_nvmem_read32(bgep, 0x4, &start)) + return; + + if (bge_nvmem_read32(bgep, offset, &val)) + return; + + if ((val & 0xfc000000) == 0x0c000000) { + if (bge_nvmem_read32(bgep, offset + 4, &val)) + return; + + if (val == 0) + newver = B_TRUE; + } + + dst_off = strlen(bgep->fw_version); + + if (newver) { + if (((BGE_FW_VER_SIZE - dst_off) < 16) || + bge_nvmem_read32(bgep, offset + 8, &ver_offset)) + return; + + offset = offset + ver_offset - start; + for (i = 0; i < 16; i += 4) { + if (bge_nvmem_read32(bgep, offset + i, &val)) + return; + val = BE_32(val); + memcpy(bgep->fw_version + dst_off + i, &val, + sizeof(val)); + } + } else { + if (bge_nvmem_read32(bgep, NVM_PTREV_BCVER, &ver_offset)) + return; + + major = (ver_offset & NVM_BCVER_MAJMSK) >> NVM_BCVER_MAJSFT; + minor = ver_offset & NVM_BCVER_MINMSK; + snprintf(&bgep->fw_version[dst_off], BGE_FW_VER_SIZE - dst_off, + "v%d.%02d", major, minor); + } +} + +static void +bge_read_fw_ver(bge_t *bgep) +{ + uint32_t val; + uint32_t magic; + + *bgep->fw_version = 0; + + if ((bgep->chipid.nvtype == BGE_NVTYPE_NONE) || + (bgep->chipid.nvtype == BGE_NVTYPE_UNKNOWN)) { + snprintf(bgep->fw_version, sizeof(bgep->fw_version), "sb"); + return; + } + + mutex_enter(bgep->genlock); + + bge_nvmem_read32(bgep, 0, &magic); + + if (magic == EEPROM_MAGIC) { + bge_read_bc_ver(bgep); + } else { + /* ignore other configs for now */ + mutex_exit(bgep->genlock); + return; + } + + if (bgep->ape_enabled) { + if (bgep->asf_enabled) { + bge_read_dash_ver(bgep); + } + } else if (bgep->asf_enabled) { + bge_read_mgmtfw_ver(bgep); + } + + mutex_exit(bgep->genlock); + + bgep->fw_version[BGE_FW_VER_SIZE - 1] = 0; /* safety */ +} + /* * attach(9E) -- Attach a device to the system * @@ -3108,6 +3630,10 @@ bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) int instance; int err; int intr_types; + int *props = NULL; + uint_t numProps; + uint32_t regval; + uint32_t pci_state_reg; #ifdef BGE_IPMI_ASF uint32_t mhcrValue; #ifdef __sparc @@ -3209,15 +3735,20 @@ bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) * has been set in PCI_CONF_COMM already, we need to write the * byte-swapped value to it. So we just write zero first for simplicity. */ - if (DEVICE_5717_SERIES_CHIPSETS(bgep)) + if (DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, 0); +#else + mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS | + MHCR_ENABLE_TAGGED_STATUS_MODE | + MHCR_MASK_INTERRUPT_MODE | + MHCR_MASK_PCI_INT_OUTPUT | + MHCR_CLEAR_INTERRUPT_INTA; +#endif pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcrValue); bge_ind_put32(bgep, MEMORY_ARBITER_MODE_REG, bge_ind_get32(bgep, MEMORY_ARBITER_MODE_REG) | MEMORY_ARBITER_ENABLE); -#else - mhcrValue = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_MHCR); -#endif if (mhcrValue & MHCR_ENABLE_ENDIAN_WORD_SWAP) { bgep->asf_wordswapped = B_TRUE; } else { @@ -3231,7 +3762,7 @@ bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) } bgep->progress |= PROGRESS_CFG; cidp = &bgep->chipid; - bzero(cidp, sizeof (*cidp)); + bzero(cidp, sizeof(*cidp)); bge_chip_cfg_init(bgep, cidp, B_FALSE); if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); @@ -3264,6 +3795,8 @@ bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) DDI_PROP_DONTPASS, rxrings_propname, cidp->rx_rings); cidp->tx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS, txrings_propname, cidp->tx_rings); + cidp->eee = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, + DDI_PROP_DONTPASS, eee_propname, cidp->eee); cidp->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS, default_mtu, BGE_DEFAULT_MTU); @@ -3282,6 +3815,36 @@ bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) goto attach_fail; } bgep->io_regs = regs; + + bgep->ape_enabled = B_FALSE; + bgep->ape_regs = NULL; + if (DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) { + err = ddi_regs_map_setup(devinfo, BGE_PCI_APEREGS_RNUMBER, + ®s, 0, 0, &bge_reg_accattr, &bgep->ape_handle); + if (err != DDI_SUCCESS) { + ddi_regs_map_free(&bgep->io_handle); + bge_problem(bgep, "ddi_regs_map_setup() failed"); + goto attach_fail; + } + bgep->ape_regs = regs; + bgep->ape_enabled = B_TRUE; + + /* + * Allow reads and writes to the + * APE register and memory space. + */ + + pci_state_reg = pci_config_get32(bgep->cfg_handle, + PCI_CONF_BGE_PCISTATE); + pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | + PCISTATE_ALLOW_APE_SHMEM_WR | PCISTATE_ALLOW_APE_PSPACE_WR; + pci_config_put32(bgep->cfg_handle, + PCI_CONF_BGE_PCISTATE, pci_state_reg); + + bge_ape_lock_init(bgep); + } + bgep->progress |= PROGRESS_REGS; /* @@ -3293,6 +3856,29 @@ bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) goto attach_fail; } + err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo, + 0, "reg", &props, &numProps); + if ((err == DDI_PROP_SUCCESS) && (numProps > 0)) { + bgep->pci_bus = PCI_REG_BUS_G(props[0]); + bgep->pci_dev = PCI_REG_DEV_G(props[0]); + bgep->pci_func = PCI_REG_FUNC_G(props[0]); + ddi_prop_free(props); + } + + if (DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) { + regval = bge_reg_get32(bgep, CPMU_STATUS_REG); + if ((bgep->chipid.device == DEVICE_ID_5719) || + (bgep->chipid.device == DEVICE_ID_5720)) { + bgep->pci_func = + ((regval & CPMU_STATUS_FUNC_NUM_5719) >> + CPMU_STATUS_FUNC_NUM_5719_SHIFT); + } else { + bgep->pci_func = ((regval & CPMU_STATUS_FUNC_NUM) >> + CPMU_STATUS_FUNC_NUM_SHIFT); + } + } + err = bge_alloc_bufs(bgep); if (err != DDI_SUCCESS) { bge_problem(bgep, "DMA buffer allocation failed"); @@ -3477,6 +4063,16 @@ bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) * Determine whether to override the chip's own MAC address */ bge_find_mac_address(bgep, cidp); + { + int slot; + for (slot = 0; slot < MAC_ADDRESS_REGS_MAX; slot++) { + ethaddr_copy(cidp->vendor_addr.addr, + bgep->curr_addr[slot].addr); + bgep->curr_addr[slot].set = 1; + } + } + + bge_read_fw_ver(bgep); bgep->unicst_addr_total = MAC_ADDRESS_REGS_MAX; bgep->unicst_addr_avail = MAC_ADDRESS_REGS_MAX; @@ -3492,7 +4088,10 @@ bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) macp->m_max_sdu = cidp->ethmax_size - sizeof (struct ether_header); macp->m_margin = VLAN_TAGSZ; macp->m_priv_props = bge_priv_prop; - macp->m_v12n = MAC_VIRT_LEVEL1; + +#if defined(ILLUMOS) + bge_m_unicst(bgep, cidp->vendor_addr.addr); +#endif /* * Finally, we're ready to register ourselves with the MAC layer diff --git a/usr/src/uts/common/io/bge/bge_mii.c b/usr/src/uts/common/io/bge/bge_mii.c index f24b6a3f16..68823b3cba 100644 --- a/usr/src/uts/common/io/bge/bge_mii.c +++ b/usr/src/uts/common/io/bge/bge_mii.c @@ -20,7 +20,13 @@ */ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010-2013, by Broadcom, Inc. + * All Rights Reserved. + */ + +/* + * Copyright (c) 2002, 2010, Oracle and/or its affiliates. + * All rights reserved. */ #include "bge_impl.h" @@ -143,6 +149,23 @@ bge_phydump(bge_t *bgep, uint16_t mii_status, uint16_t aux) #endif /* BGE_DEBUGGING */ +static void +bge_phy_toggle_auxctl_smdsp(bge_t *bgep, + boolean_t enable) +{ + uint16_t val; + + val = bge_mii_get16(bgep, MII_AUX_CONTROL); + + if (enable) { + val |= MII_AUX_CTRL_SMDSP_ENA; + } else { + val &= ~MII_AUX_CTRL_SMDSP_ENA; + } + + bge_mii_put16(bgep, MII_AUX_CONTROL, (val | MII_AUX_CTRL_TX_6DB)); +} + /* * Basic low-level function to probe for a PHY * @@ -153,6 +176,7 @@ bge_phy_probe(bge_t *bgep) { uint16_t miicfg; uint32_t nicsig, niccfg; + int i; BGE_TRACE(("bge_phy_probe($%p)", (void *)bgep)); @@ -174,8 +198,10 @@ bge_phy_probe(bge_t *bgep) * order to clear any sticky bits (but they should * have been cleared by the RESET, I think). */ - miicfg = bge_mii_get16(bgep, MII_STATUS); - miicfg = bge_mii_get16(bgep, MII_STATUS); + for (i = 0; i < 100; i++) { + drv_usecwait(40); + miicfg = bge_mii_get16(bgep, MII_STATUS); + } BGE_DEBUG(("bge_phy_probe: status 0x%x", miicfg)); /* @@ -190,7 +216,7 @@ bge_phy_probe(bge_t *bgep) case 0xffff: return (B_FALSE); - default : + default: return (B_TRUE); } } @@ -527,7 +553,7 @@ bge_phy_bit_err_fix(bge_t *bgep) } /* - * End of Broadcom-derived workaround code * + * End of Broadcom-derived workaround code */ static int @@ -536,12 +562,13 @@ bge_restart_copper(bge_t *bgep, boolean_t powerdown) uint16_t phy_status; boolean_t reset_ok; uint16_t extctrl, auxctrl; + int i; BGE_TRACE(("bge_restart_copper($%p, %d)", (void *)bgep, powerdown)); ASSERT(mutex_owned(bgep->genlock)); - switch (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev)) { + switch (MHCR_CHIP_ASIC_REV(bgep)) { default: /* * Shouldn't happen; it means we don't recognise this chip. @@ -559,7 +586,7 @@ bge_restart_copper(bge_t *bgep, boolean_t powerdown) case MHCR_CHIP_ASIC_REV_5906: case MHCR_CHIP_ASIC_REV_5700: case MHCR_CHIP_ASIC_REV_5701: - case MHCR_CHIP_ASIC_REV_5723: + case MHCR_CHIP_ASIC_REV_5723: /* 5717 and 5725 series as well */ case MHCR_CHIP_ASIC_REV_5721_5751: /* * Just a plain reset; the "check" code breaks these chips @@ -588,7 +615,7 @@ bge_restart_copper(bge_t *bgep, boolean_t powerdown) break; } - switch (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev)) { + switch (MHCR_CHIP_ASIC_REV(bgep)) { case MHCR_CHIP_ASIC_REV_5705: case MHCR_CHIP_ASIC_REV_5721_5751: bge_phy_bit_err_fix(bgep); @@ -623,8 +650,10 @@ bge_restart_copper(bge_t *bgep, boolean_t powerdown) * order to clear any sticky bits (but they should * have been cleared by the RESET, I think). */ - phy_status = bge_mii_get16(bgep, MII_STATUS); - phy_status = bge_mii_get16(bgep, MII_STATUS); + for (i = 0; i < 100; i++) { + drv_usecwait(40); + phy_status = bge_mii_get16(bgep, MII_STATUS); + } BGE_DEBUG(("bge_restart_copper: status 0x%x", phy_status)); /* @@ -635,6 +664,232 @@ bge_restart_copper(bge_t *bgep, boolean_t powerdown) return (DDI_SUCCESS); } +boolean_t +bge_eee_cap(bge_t * bgep) +{ + if (!(DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep))) { + /* EEE is not supported on this chip */ + BGE_DEBUG(("bge_eee: eee not supported (device 0x%x)", + bgep->chipid.device)); + return (B_FALSE); + } + + switch (CHIP_ASIC_REV_PROD_ID(bgep)) { + case CHIP_ASIC_REV_5717_B0: /* = CHIP_ASIC_REV_5718_B0 */ + case CHIP_ASIC_REV_5717_C0: + /* case CHIP_ASIC_REV_5718_B0: */ + case CHIP_ASIC_REV_5719_A0: + case CHIP_ASIC_REV_5719_A1: + case CHIP_ASIC_REV_5720_A0: + case CHIP_ASIC_REV_5725_A0: + case CHIP_ASIC_REV_5727_B0: + return (B_TRUE); + + default: + /* EEE is not supported on this asic rev */ + BGE_DEBUG(("bge_eee: eee not supported (asic rev 0x%08x)", + bgep->chipid.asic_rev)); + return (B_FALSE); + } +} + +void +bge_eee_init(bge_t * bgep) +{ + uint32_t val; + + BGE_TRACE(("bge_eee_init($%p)", (void *)bgep)); + + ASSERT(mutex_owned(bgep->genlock)); + + if (!bge_eee_cap(bgep)) { + return; + } + + /* Enable MAC control of LPI */ + + val = (EEE_LINK_IDLE_PCIE_NL0 | EEE_LINK_IDLE_UART_IDL); + if (DEVICE_5725_SERIES_CHIPSETS(bgep)) + val |= EEE_LINK_IDLE_APE_TX_MT; + bge_reg_put32(bgep, EEE_LINK_IDLE_CONTROL_REG, val); + + bge_reg_put32(bgep, EEE_CONTROL_REG, EEE_CONTROL_EXIT_20_1_US); + + val = EEE_MODE_ERLY_L1_XIT_DET | EEE_MODE_LPI_IN_TX | + EEE_MODE_LPI_IN_RX | EEE_MODE_EEE_ENABLE; + + if (bgep->chipid.device != DEVICE_ID_5717) + val |= EEE_MODE_SND_IDX_DET_EN; + + //val |= EEE_MODE_APE_TX_DET_EN; + + if (!bgep->chipid.eee) { + val = 0; + } + + bge_reg_put32(bgep, EEE_MODE_REG, val); + + /* Set EEE timer debounce values */ + + bge_reg_put32(bgep, EEE_DEBOUNCE_T1_CONTROL_REG, + EEE_DEBOUNCE_T1_PCIEXIT_2047US | EEE_DEBOUNCE_T1_LNKIDLE_2047US); + + bge_reg_put32(bgep, EEE_DEBOUNCE_T2_CONTROL_REG, + EEE_DEBOUNCE_T2_APE_TX_2047US | EEE_DEBOUNCE_T2_TXIDXEQ_2047US); +} + +void +bge_eee_autoneg(bge_t * bgep, boolean_t adv_100fdx, boolean_t adv_1000fdx) +{ + uint32_t val; + uint16_t mii_val; + + BGE_TRACE(("bge_eee_autoneg($%p)", (void *)bgep)); + + ASSERT(mutex_owned(bgep->genlock)); + + if (!bge_eee_cap(bgep)) { + return; + } + + /* Disable LPI Requests */ + val = bge_reg_get32(bgep, EEE_MODE_REG); + val &= ~EEE_MODE_LPI_ENABLE; + bge_reg_put32(bgep, EEE_MODE_REG, val); + + bge_phy_toggle_auxctl_smdsp(bgep, B_TRUE); + + mii_val = 0; + + if (bgep->chipid.eee) { + if (adv_100fdx) { + mii_val |= EEE_CL45_D7_RESULT_STAT_LP_100TX; + } + if (adv_1000fdx) { + mii_val |= EEE_CL45_D7_RESULT_STAT_LP_1000T; + } + } + + /* Enable EEE advertisement for the specified mode(s)... */ + bge_mii_put16(bgep, MII_MMD_CTRL, MDIO_MMD_AN); + bge_mii_put16(bgep, MII_MMD_ADDRESS_DATA, MDIO_AN_EEE_ADV); + bge_mii_put16(bgep, MII_MMD_CTRL, + MII_MMD_CTRL_DATA_NOINC | MDIO_MMD_AN); + bge_mii_put16(bgep, MII_MMD_ADDRESS_DATA, mii_val); + + /* Setup PHY DSP for EEE */ + switch (bgep->chipid.device) { + case DEVICE_ID_5717: + case DEVICE_ID_5718: + case DEVICE_ID_5719: + /* If we advertised any EEE advertisements above... */ + if (mii_val) { + mii_val = (MII_DSP_TAP26_ALNOKO | + MII_DSP_TAP26_RMRXSTO | + MII_DSP_TAP26_OPCSINPT); + } + bge_phydsp_write(bgep, MII_DSP_TAP26, mii_val); + /* fall through */ + case DEVICE_ID_5720: + case DEVICE_ID_5725: + case DEVICE_ID_5727: + mii_val = bge_phydsp_read(bgep, MII_DSP_CH34TP2); + bge_phydsp_write(bgep, MII_DSP_CH34TP2, + (mii_val | MII_DSP_CH34TP2_HIBW01)); + } + + bge_phy_toggle_auxctl_smdsp(bgep, B_FALSE); +} + +void +bge_eee_adjust(bge_t * bgep) +{ + uint32_t val; + uint16_t mii_val; + + BGE_TRACE(("bge_eee_adjust($%p, %d)", (void *)bgep)); + + ASSERT(mutex_owned(bgep->genlock)); + + if (!bge_eee_cap(bgep)) { + return; + } + + bgep->eee_lpi_wait = 0; + + /* Check for PHY link status */ + if (bgep->param_link_up) { + BGE_DEBUG(("bge_eee_adjust: link status up")); + + /* + * XXX if duplex full and speed is 1000 or 100 then do the + * following... + */ + + if (bgep->param_link_speed == 1000) { + BGE_DEBUG(("bge_eee_adjust: eee timing for 1000Mb")); + bge_reg_put32(bgep, EEE_CONTROL_REG, + EEE_CONTROL_EXIT_16_5_US); + } else if (bgep->param_link_speed == 100) { + BGE_DEBUG(("bge_eee_adjust: eee timing for 100Mb")); + bge_reg_put32(bgep, EEE_CONTROL_REG, + EEE_CONTROL_EXIT_36_US); + } + + /* Read PHY's EEE negotiation status */ + bge_mii_put16(bgep, MII_MMD_CTRL, MDIO_MMD_AN); + bge_mii_put16(bgep, MII_MMD_ADDRESS_DATA, + EEE_CL45_D7_RESULT_STAT); + bge_mii_put16(bgep, MII_MMD_CTRL, + MII_MMD_CTRL_DATA_NOINC | MDIO_MMD_AN); + mii_val = bge_mii_get16(bgep, MII_MMD_ADDRESS_DATA); + + /* Enable EEE LPI request if EEE negotiated */ + if ((mii_val == EEE_CL45_D7_RESULT_STAT_LP_1000T) || + (mii_val == EEE_CL45_D7_RESULT_STAT_LP_100TX)) { + BGE_DEBUG(("bge_eee_adjust: eee negotiaton success, lpi scheduled")); + bgep->eee_lpi_wait = 2; + } else { + BGE_DEBUG(("bge_eee_adjust: eee negotiation failed")); + } + } else { + BGE_DEBUG(("bge_eee_adjust: link status down")); + } + + if (!bgep->eee_lpi_wait) { + if (bgep->param_link_up) { + bge_phy_toggle_auxctl_smdsp(bgep, B_TRUE); + bge_phydsp_write(bgep, MII_DSP_TAP26, 0); + bge_phy_toggle_auxctl_smdsp(bgep, B_FALSE); + } + + /* Disable LPI requests */ + val = bge_reg_get32(bgep, EEE_MODE_REG); + val &= ~EEE_MODE_LPI_ENABLE; + bge_reg_put32(bgep, EEE_MODE_REG, val); + } +} + +void +bge_eee_enable(bge_t * bgep) +{ + uint32_t val; + + /* XXX check for EEE for 5717 family... */ + + if (bgep->param_link_speed == 1000) { + bge_phy_toggle_auxctl_smdsp(bgep, B_TRUE); + bge_phydsp_write(bgep, MII_DSP_TAP26, + MII_DSP_TAP26_ALNOKO | MII_DSP_TAP26_RMRXSTO); + bge_phy_toggle_auxctl_smdsp(bgep, B_FALSE); + } + + val = bge_reg_get32(bgep, EEE_MODE_REG); + val |= EEE_MODE_LPI_ENABLE; + bge_reg_put32(bgep, EEE_MODE_REG, val); +} + /* * Synchronise the (copper) PHY's speed/duplex/autonegotiation capabilities * and advertisements with the required settings as specified by the various @@ -866,6 +1121,10 @@ bge_update_copper(bge_t *bgep) break; } #endif /* BGE_COPPER_WIRESPEED */ + + /* enable EEE on those chips that support it */ + bge_eee_autoneg(bgep, adv_100fdx, adv_1000fdx); + return (DDI_SUCCESS); } @@ -877,13 +1136,17 @@ bge_check_copper(bge_t *bgep, boolean_t recheck) uint16_t aux; uint_t mode; boolean_t linkup; + int i; /* * Step 10: read the status from the PHY (which is self-clearing * on read!); also read & clear the main (Ethernet) MAC status * (the relevant bits of this are write-one-to-clear). */ - mii_status = bge_mii_get16(bgep, MII_STATUS); + for (i = 0; i < 100; i++) { + drv_usecwait(40); + mii_status = bge_mii_get16(bgep, MII_STATUS); + } emac_status = bge_reg_get32(bgep, ETHERNET_MAC_STATUS_REG); bge_reg_put32(bgep, ETHERNET_MAC_STATUS_REG, emac_status); @@ -897,14 +1160,19 @@ bge_check_copper(bge_t *bgep, boolean_t recheck) * we not forcing a recheck (i.e. the link state was already * known), there's nothing to do. */ - if (mii_status == bgep->phy_gen_status && !recheck) + if (mii_status == bgep->phy_gen_status && !recheck) { + BGE_DEBUG(("bge_check_copper: no link change")); return (B_FALSE); + } do { /* * Step 11: read AUX STATUS register to find speed/duplex */ - aux = bge_mii_get16(bgep, MII_AUX_STATUS); + for (i = 0; i < 2000; i++) { + drv_usecwait(10); + aux = bge_mii_get16(bgep, MII_AUX_STATUS); + } BGE_CDB(bge_phydump, (bgep, mii_status, aux)); /* @@ -935,7 +1203,12 @@ bge_check_copper(bge_t *bgep, boolean_t recheck) */ bgep->phy_aux_status = aux; bgep->phy_gen_status = mii_status; - mii_status = bge_mii_get16(bgep, MII_STATUS); + + for (i = 0; i < 100; i++) + { + drv_usecwait(40); + mii_status = bge_mii_get16(bgep, MII_STATUS); + } } while (mii_status != bgep->phy_gen_status); /* @@ -1014,10 +1287,12 @@ bge_check_copper(bge_t *bgep, boolean_t recheck) bgep->param_link_duplex = bge_copper_link_duplex[mode]; } - BGE_DEBUG(("bge_check_copper: link now %s speed %d duplex %d", - UPORDOWN(bgep->param_link_up), - bgep->param_link_speed, - bgep->param_link_duplex)); + bge_eee_adjust(bgep); + + bge_log(bgep, "bge_check_copper: link now %s speed %d duplex %d", + UPORDOWN(bgep->param_link_up), + bgep->param_link_speed, + bgep->param_link_duplex); return (B_TRUE); } @@ -1054,13 +1329,13 @@ bge_restart_serdes(bge_t *bgep, boolean_t powerdown) * appropriately for the SerDes interface ... */ macmode = bge_reg_get32(bgep, ETHERNET_MAC_MODE_REG); - if (DEVICE_5714_SERIES_CHIPSETS(bgep)) { - macmode |= ETHERNET_MODE_LINK_POLARITY; - macmode &= ~ETHERNET_MODE_PORTMODE_MASK; + macmode &= ~ETHERNET_MODE_LINK_POLARITY; + macmode &= ~ETHERNET_MODE_PORTMODE_MASK; + if (DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep) || + DEVICE_5714_SERIES_CHIPSETS(bgep)) { macmode |= ETHERNET_MODE_PORTMODE_GMII; } else { - macmode &= ~ETHERNET_MODE_LINK_POLARITY; - macmode &= ~ETHERNET_MODE_PORTMODE_MASK; macmode |= ETHERNET_MODE_PORTMODE_TBI; } bge_reg_put32(bgep, ETHERNET_MAC_MODE_REG, macmode); @@ -1288,21 +1563,34 @@ bge_check_serdes(bge_t *bgep, boolean_t recheck) * to BCM5705, BCM5788, BCM5721, BCM5751, BCM5752, * BCM5714, and BCM5715 devices. */ - if (DEVICE_5714_SERIES_CHIPSETS(bgep)) { + if (DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep) || + DEVICE_5714_SERIES_CHIPSETS(bgep)) { tx_status = bge_reg_get32(bgep, TRANSMIT_MAC_STATUS_REG); linkup = BIS(tx_status, TRANSMIT_STATUS_LINK_UP); emac_status = bge_reg_get32(bgep, ETHERNET_MAC_STATUS_REG); bgep->serdes_status = emac_status; + /* clear write-one-to-clear bits in MAC status */ + if ((emac_status & ETHERNET_STATUS_MI_COMPLETE) && + (DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep))) { + emac_status |= ETHERNET_STATUS_SYNC_CHANGED | + ETHERNET_STATUS_CFG_CHANGED; + } + bge_reg_put32(bgep, + ETHERNET_MAC_STATUS_REG, emac_status); + /* + * If the link status has not changed then then + * break. If it has loop around and recheck again. + * Keep looping until the link status has not + * changed. + */ if ((linkup && linkup_old) || (!linkup && !linkup_old)) { - emac_status &= ~ETHERNET_STATUS_LINK_CHANGED; - emac_status &= ~ETHERNET_STATUS_RECEIVING_CFG; break; } - emac_status |= ETHERNET_STATUS_LINK_CHANGED; - emac_status |= ETHERNET_STATUS_RECEIVING_CFG; if (linkup) linkup_old = B_TRUE; else @@ -1467,10 +1755,10 @@ bge_check_serdes(bge_t *bgep, boolean_t recheck) } bgep->link_state = LINK_STATE_UNKNOWN; - BGE_DEBUG(("bge_check_serdes: link now %s speed %d duplex %d", - UPORDOWN(bgep->param_link_up), - bgep->param_link_speed, - bgep->param_link_duplex)); + bge_log(bgep, "bge_check_serdes: link now %s speed %d duplex %d", + UPORDOWN(bgep->param_link_up), + bgep->param_link_speed, + bgep->param_link_duplex); return (B_TRUE); } @@ -1495,6 +1783,8 @@ static const phys_ops_t serdes_ops = { int bge_phys_init(bge_t *bgep) { + uint32_t regval; + BGE_TRACE(("bge_phys_init($%p)", (void *)bgep)); mutex_enter(bgep->genlock); @@ -1506,13 +1796,12 @@ bge_phys_init(bge_t *bgep) * BCM800x PHY. */ bgep->phy_mii_addr = 1; + if (DEVICE_5717_SERIES_CHIPSETS(bgep)) { - int regval = bge_reg_get32(bgep, CPMU_STATUS_REG); - if (regval & CPMU_STATUS_FUN_NUM) - bgep->phy_mii_addr += 1; + bgep->phy_mii_addr = (bgep->pci_func + 1); regval = bge_reg_get32(bgep, SGMII_STATUS_REG); if (regval & MEDIA_SELECTION_MODE) - bgep->phy_mii_addr += 7; + bgep->phy_mii_addr += 7; /* sgmii */ } if (bge_phy_probe(bgep)) { @@ -1606,18 +1895,15 @@ bge_phys_update(bge_t *bgep) boolean_t bge_phys_check(bge_t *bgep) { - int32_t orig_state; - boolean_t recheck; - BGE_TRACE(("bge_phys_check($%p)", (void *)bgep)); ASSERT(mutex_owned(bgep->genlock)); - orig_state = bgep->link_state; - recheck = orig_state == LINK_STATE_UNKNOWN; - recheck = (*bgep->physops->phys_check)(bgep, recheck); - if (!recheck) - return (B_FALSE); - - return (B_TRUE); + /* + * Force a link recheck if current state is unknown. + * phys_check() returns TRUE if the link status changed, + * FALSE otherwise. + */ + return ((*bgep->physops->phys_check)(bgep, + (bgep->link_state == LINK_STATE_UNKNOWN))); } diff --git a/usr/src/uts/common/io/bge/bge_send.c b/usr/src/uts/common/io/bge/bge_send.c index 28080d93af..87e0c0105d 100644 --- a/usr/src/uts/common/io/bge/bge_send.c +++ b/usr/src/uts/common/io/bge/bge_send.c @@ -20,7 +20,13 @@ */ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010-2013, by Broadcom, Inc. + * All Rights Reserved. + */ + +/* + * Copyright (c) 2002, 2010, Oracle and/or its affiliates. + * All rights reserved. */ #include "bge_impl.h" @@ -443,6 +449,11 @@ start_tx: hw_sbd_p->flags |= SBD_FLAG_IP_CKSUM; if (pktp->pflags & HCK_FULLCKSUM) hw_sbd_p->flags |= SBD_FLAG_TCP_UDP_CKSUM; + if (!(bgep->chipid.flags & CHIP_FLAG_NO_JUMBO) && + (DEVICE_5717_SERIES_CHIPSETS(bgep) || + DEVICE_5725_SERIES_CHIPSETS(bgep)) && + (txbuf->copy_len > ETHERMAX)) + hw_sbd_p->flags |= SBD_FLAG_JMB_PKT; hw_sbd_p->flags |= SBD_FLAG_PACKET_END; txfill_next = NEXT(txfill_next, BGE_SEND_BUF_MAX); diff --git a/usr/src/uts/common/io/e1000api/e1000_80003es2lan.c b/usr/src/uts/common/io/e1000api/e1000_80003es2lan.c index bdbb31cfdb..076e02be9b 100644 --- a/usr/src/uts/common/io/e1000api/e1000_80003es2lan.c +++ b/usr/src/uts/common/io/e1000api/e1000_80003es2lan.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2011, Intel Corporation + Copyright (c) 2001-2013, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -32,16 +32,12 @@ ******************************************************************************/ /*$FreeBSD$*/ -/* - * 80003ES2LAN Gigabit Ethernet Controller (Copper) +/* 80003ES2LAN Gigabit Ethernet Controller (Copper) * 80003ES2LAN Gigabit Ethernet Controller (Serdes) */ #include "e1000_api.h" -static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw); -static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw); -static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw); static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw); static void e1000_release_phy_80003es2lan(struct e1000_hw *hw); static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw); @@ -71,14 +67,12 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, u16 *data); static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, u16 data); -static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw); static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw); static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw); static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw); -/* - * A table for the GG82563 cable length where the range is defined +/* A table for the GG82563 cable length where the range is defined * with a lower bound at "index" and the upper bound at * "index + 5". */ @@ -95,13 +89,13 @@ static const u16 e1000_gg82563_cable_length_table[] = { static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; - s32 ret_val = E1000_SUCCESS; + s32 ret_val; DEBUGFUNC("e1000_init_phy_params_80003es2lan"); if (hw->phy.media_type != e1000_media_type_copper) { phy->type = e1000_phy_none; - goto out; + return E1000_SUCCESS; } else { phy->ops.power_up = e1000_power_up_phy_copper; phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan; @@ -133,12 +127,9 @@ static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) ret_val = e1000_get_phy_id(hw); /* Verify phy id */ - if (phy->id != GG82563_E_PHY_ID) { - ret_val = -E1000_ERR_PHY; - goto out; - } + if (phy->id != GG82563_E_PHY_ID) + return -E1000_ERR_PHY; -out: return ret_val; } @@ -176,8 +167,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> E1000_EECD_SIZE_EX_SHIFT); - /* - * Added to a constant, "size" becomes the left-shift value + /* Added to a constant, "size" becomes the left-shift value * for setting word_size. */ size += NVM_WORD_SIZE_BASE_SHIFT; @@ -234,8 +224,8 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw) /* FWSM register */ mac->has_fwsm = TRUE; /* ARC supported; valid only if manageability features are enabled. */ - mac->arc_subsystem_valid = (E1000_READ_REG(hw, E1000_FWSM) & - E1000_FWSM_MODE_MASK) ? TRUE : FALSE; + mac->arc_subsystem_valid = !!(E1000_READ_REG(hw, E1000_FWSM) & + E1000_FWSM_MODE_MASK); /* Adaptive IFS not supported */ mac->adaptive_ifs = FALSE; @@ -377,14 +367,13 @@ static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw) ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); if (ret_val) - goto out; + return ret_val; ret_val = e1000_acquire_nvm_generic(hw); if (ret_val) e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); -out: return ret_val; } @@ -415,23 +404,20 @@ static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) u32 swfw_sync; u32 swmask = mask; u32 fwmask = mask << 16; - s32 ret_val = E1000_SUCCESS; - s32 i = 0, timeout = 50; + s32 i = 0; + s32 timeout = 50; DEBUGFUNC("e1000_acquire_swfw_sync_80003es2lan"); while (i < timeout) { - if (e1000_get_hw_semaphore_generic(hw)) { - ret_val = -E1000_ERR_SWFW_SYNC; - goto out; - } + if (e1000_get_hw_semaphore_generic(hw)) + return -E1000_ERR_SWFW_SYNC; swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); if (!(swfw_sync & (fwmask | swmask))) break; - /* - * Firmware currently using resource (fwmask) + /* Firmware currently using resource (fwmask) * or other software thread using resource (swmask) */ e1000_put_hw_semaphore_generic(hw); @@ -441,8 +427,7 @@ static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) if (i == timeout) { DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); - ret_val = -E1000_ERR_SWFW_SYNC; - goto out; + return -E1000_ERR_SWFW_SYNC; } swfw_sync |= swmask; @@ -450,8 +435,7 @@ static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) e1000_put_hw_semaphore_generic(hw); -out: - return ret_val; + return E1000_SUCCESS; } /** @@ -497,14 +481,13 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, ret_val = e1000_acquire_phy_80003es2lan(hw); if (ret_val) - goto out; + return ret_val; /* Select Configuration Page */ if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { page_select = GG82563_PHY_PAGE_SELECT; } else { - /* - * Use Alternative Page Select register to access + /* Use Alternative Page Select register to access * registers 30 and 31 */ page_select = GG82563_PHY_PAGE_SELECT_ALT; @@ -514,12 +497,11 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, ret_val = e1000_write_phy_reg_mdic(hw, page_select, temp); if (ret_val) { e1000_release_phy_80003es2lan(hw); - goto out; + return ret_val; } - if (hw->dev_spec._80003es2lan.mdic_wa_enable == TRUE) { - /* - * The "ready" bit in the MDIC register may be incorrectly set + if (hw->dev_spec._80003es2lan.mdic_wa_enable) { + /* The "ready" bit in the MDIC register may be incorrectly set * before the device has completed the "Page Select" MDI * transaction. So we wait 200us after each MDI command... */ @@ -529,9 +511,8 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, ret_val = e1000_read_phy_reg_mdic(hw, page_select, &temp); if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { - ret_val = -E1000_ERR_PHY; e1000_release_phy_80003es2lan(hw); - goto out; + return -E1000_ERR_PHY; } usec_delay(200); @@ -549,7 +530,6 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, e1000_release_phy_80003es2lan(hw); -out: return ret_val; } @@ -572,14 +552,13 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, ret_val = e1000_acquire_phy_80003es2lan(hw); if (ret_val) - goto out; + return ret_val; /* Select Configuration Page */ if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { page_select = GG82563_PHY_PAGE_SELECT; } else { - /* - * Use Alternative Page Select register to access + /* Use Alternative Page Select register to access * registers 30 and 31 */ page_select = GG82563_PHY_PAGE_SELECT_ALT; @@ -589,12 +568,11 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, ret_val = e1000_write_phy_reg_mdic(hw, page_select, temp); if (ret_val) { e1000_release_phy_80003es2lan(hw); - goto out; + return ret_val; } - if (hw->dev_spec._80003es2lan.mdic_wa_enable == TRUE) { - /* - * The "ready" bit in the MDIC register may be incorrectly set + if (hw->dev_spec._80003es2lan.mdic_wa_enable) { + /* The "ready" bit in the MDIC register may be incorrectly set * before the device has completed the "Page Select" MDI * transaction. So we wait 200us after each MDI command... */ @@ -604,9 +582,8 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, ret_val = e1000_read_phy_reg_mdic(hw, page_select, &temp); if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { - ret_val = -E1000_ERR_PHY; e1000_release_phy_80003es2lan(hw); - goto out; + return -E1000_ERR_PHY; } usec_delay(200); @@ -624,7 +601,6 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, e1000_release_phy_80003es2lan(hw); -out: return ret_val; } @@ -655,7 +631,6 @@ static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset, static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw) { s32 timeout = PHY_CFG_TIMEOUT; - s32 ret_val = E1000_SUCCESS; u32 mask = E1000_NVM_CFG_DONE_PORT_0; DEBUGFUNC("e1000_get_cfg_done_80003es2lan"); @@ -671,12 +646,10 @@ static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw) } if (!timeout) { DEBUGOUT("MNG configuration cycle has not completed.\n"); - ret_val = -E1000_ERR_RESET; - goto out; + return -E1000_ERR_RESET; } -out: - return ret_val; + return E1000_SUCCESS; } /** @@ -688,33 +661,32 @@ out: **/ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) { - s32 ret_val = E1000_SUCCESS; + s32 ret_val; u16 phy_data; bool link; DEBUGFUNC("e1000_phy_force_speed_duplex_80003es2lan"); if (!(hw->phy.ops.read_reg)) - goto out; + return E1000_SUCCESS; - /* - * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI + /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI * forced whenever speed and duplex are forced. */ ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) - goto out; + return ret_val; phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO; ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data); if (ret_val) - goto out; + return ret_val; DEBUGOUT1("GG82563 PSCR: %X\n", phy_data); ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_data); if (ret_val) - goto out; + return ret_val; e1000_phy_force_speed_duplex_setup(hw, &phy_data); @@ -723,7 +695,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_data); if (ret_val) - goto out; + return ret_val; usec_delay(1); @@ -733,32 +705,30 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) - goto out; + return ret_val; if (!link) { - /* - * We didn't get link. + /* We didn't get link. * Reset the DSP and cross our fingers. */ ret_val = e1000_phy_reset_dsp_generic(hw); if (ret_val) - goto out; + return ret_val; } /* Try once more */ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) - goto out; + return ret_val; } ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data); if (ret_val) - goto out; + return ret_val; - /* - * Resetting the phy means we need to verify the TX_CLK corresponds + /* Resetting the phy means we need to verify the TX_CLK corresponds * to the link speed. 10Mbps -> 2.5MHz, else 25MHz. */ phy_data &= ~GG82563_MSCR_TX_CLK_MASK; @@ -767,15 +737,13 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) else phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25; - /* - * In addition, we must re-enable CRS on Tx for both half and full + /* In addition, we must re-enable CRS on Tx for both half and full * duplex. */ phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data); -out: return ret_val; } @@ -789,32 +757,29 @@ out: static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; - s32 ret_val = E1000_SUCCESS; + s32 ret_val; u16 phy_data, index; DEBUGFUNC("e1000_get_cable_length_80003es2lan"); if (!(hw->phy.ops.read_reg)) - goto out; + return E1000_SUCCESS; ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_DSP_DISTANCE, &phy_data); if (ret_val) - goto out; + return ret_val; index = phy_data & GG82563_DSPD_CABLE_LENGTH; - if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) { - ret_val = -E1000_ERR_PHY; - goto out; - } + if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) + return -E1000_ERR_PHY; phy->min_cable_length = e1000_gg82563_cable_length_table[index]; phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5]; phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; -out: - return ret_val; + return E1000_SUCCESS; } /** @@ -855,11 +820,11 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) { u32 ctrl; s32 ret_val; + u16 kum_reg_data; DEBUGFUNC("e1000_reset_hw_80003es2lan"); - /* - * Prevent the PCI-E bus from sticking if there is no TLP connection + /* Prevent the PCI-E bus from sticking if there is no TLP connection * on the last TLP read/write transaction when MAC is reset. */ ret_val = e1000_disable_pcie_master_generic(hw); @@ -878,23 +843,30 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) ctrl = E1000_READ_REG(hw, E1000_CTRL); ret_val = e1000_acquire_phy_80003es2lan(hw); + if (ret_val) + return ret_val; + DEBUGOUT("Issuing a global reset to MAC\n"); E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); e1000_release_phy_80003es2lan(hw); + /* Disable IBIST slave mode (far-end loopback) */ + e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + &kum_reg_data); + kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; + e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + kum_reg_data); + ret_val = e1000_get_auto_rd_done_generic(hw); if (ret_val) /* We don't want to continue accessing MAC registers. */ - goto out; + return ret_val; /* Clear any pending interrupt events. */ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); E1000_READ_REG(hw, E1000_ICR); - ret_val = e1000_check_alt_mac_addr_generic(hw); - -out: - return ret_val; + return e1000_check_alt_mac_addr_generic(hw); } /** @@ -917,9 +889,9 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) /* Initialize identification LED */ ret_val = mac->ops.id_led_init(hw); + /* An error is not fatal and we should not stop init due to this */ if (ret_val) DEBUGOUT("Error initializing identification LED\n"); - /* This is not fatal and we should not stop init due to this */ /* Disabling VLAN filtering */ DEBUGOUT("Initializing the IEEE VLAN\n"); @@ -935,6 +907,8 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) /* Setup link and flow control */ ret_val = mac->ops.setup_link(hw); + if (ret_val) + return ret_val; /* Disable IBIST slave mode (far-end loopback) */ e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, @@ -945,14 +919,14 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) /* Set the transmit descriptor write-back policy */ reg_data = E1000_READ_REG(hw, E1000_TXDCTL(0)); - reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | - E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg_data); /* ...for both queues. */ reg_data = E1000_READ_REG(hw, E1000_TXDCTL(1)); - reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | - E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg_data); /* Enable retransmit on late collisions */ @@ -979,18 +953,16 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) /* default to TRUE to enable the MDIC W/A */ hw->dev_spec._80003es2lan.mdic_wa_enable = TRUE; - ret_val = e1000_read_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET >> - E1000_KMRNCTRLSTA_OFFSET_SHIFT, - &i); + ret_val = + e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET >> + E1000_KMRNCTRLSTA_OFFSET_SHIFT, &i); if (!ret_val) { if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) == E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO) hw->dev_spec._80003es2lan.mdic_wa_enable = FALSE; } - /* - * Clear all of the statistics registers (clear on read). It is + /* Clear all of the statistics registers (clear on read). It is * important that we do this after we have tried to establish link * because the symbol error count will increment wildly if there * is no link. @@ -1037,6 +1009,13 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw) reg |= (1 << 28); E1000_WRITE_REG(hw, E1000_TARC(1), reg); + /* Disable IPv6 extension header parsing because some malformed + * IPv6 headers can hang the Rx. + */ + reg = E1000_READ_REG(hw, E1000_RFCTL); + reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); + E1000_WRITE_REG(hw, E1000_RFCTL, reg); + return; } @@ -1050,14 +1029,14 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; - u32 ctrl_ext; + u32 reg; u16 data; DEBUGFUNC("e1000_copper_link_setup_gg82563_80003es2lan"); ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, &data); if (ret_val) - goto out; + return ret_val; data |= GG82563_MSCR_ASSERT_CRS_ON_TX; /* Use 25MHz for both link down and 1000Base-T for Tx clock. */ @@ -1065,10 +1044,9 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, data); if (ret_val) - goto out; + return ret_val; - /* - * Options: + /* Options: * MDI/MDI-X = 0 (default) * 0 - Auto for all speeds * 1 - MDI mode @@ -1077,7 +1055,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) */ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_SPEC_CTRL, &data); if (ret_val) - goto out; + return ret_val; data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK; @@ -1094,8 +1072,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) break; } - /* - * Options: + /* Options: * disable_polarity_correction = 0 (default) * Automatic Correction for Reversed Cable Polarity * 0 - Disabled @@ -1107,90 +1084,86 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_SPEC_CTRL, data); if (ret_val) - goto out; + return ret_val; /* SW Reset the PHY so all changes take effect */ ret_val = hw->phy.ops.commit(hw); if (ret_val) { DEBUGOUT("Error Resetting the PHY\n"); - goto out; + return ret_val; } /* Bypass Rx and Tx FIFO's */ - ret_val = e1000_write_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL, - E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | - E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); + reg = E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL; + data = (E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | + E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data); if (ret_val) - goto out; + return ret_val; - ret_val = e1000_read_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE, &data); + reg = E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE; + ret_val = e1000_read_kmrn_reg_80003es2lan(hw, reg, &data); if (ret_val) - goto out; + return ret_val; data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE; - ret_val = e1000_write_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE, data); + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data); if (ret_val) - goto out; + return ret_val; ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_SPEC_CTRL_2, &data); if (ret_val) - goto out; + return ret_val; data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG; ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_SPEC_CTRL_2, data); if (ret_val) - goto out; + return ret_val; - ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); - ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK); - E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg &= ~E1000_CTRL_EXT_LINK_MODE_MASK; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, &data); if (ret_val) - goto out; + return ret_val; - /* - * Do not init these registers when the HW is in IAMT mode, since the + /* Do not init these registers when the HW is in IAMT mode, since the * firmware will have already initialized them. We only initialize * them if the HW is not in IAMT mode. */ - if (!(hw->mac.ops.check_mng_mode(hw))) { + if (!hw->mac.ops.check_mng_mode(hw)) { /* Enable Electrical Idle on the PHY */ data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE; ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, data); if (ret_val) - goto out; + return ret_val; ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, &data); if (ret_val) - goto out; + return ret_val; data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, data); if (ret_val) - goto out; + return ret_val; } - /* - * Workaround: Disable padding in Kumeran interface in the MAC + /* Workaround: Disable padding in Kumeran interface in the MAC * and in the PHY to avoid CRC errors. */ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_INBAND_CTRL, &data); if (ret_val) - goto out; + return ret_val; data |= GG82563_ICR_DIS_PADDING; ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_INBAND_CTRL, data); if (ret_val) - goto out; + return ret_val; -out: - return ret_val; + return E1000_SUCCESS; } /** @@ -1213,42 +1186,42 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw) ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); E1000_WRITE_REG(hw, E1000_CTRL, ctrl); - /* - * Set the mac to wait the maximum time between each + /* Set the mac to wait the maximum time between each * iteration and increase the max iterations when * polling the phy; this fixes erroneous timeouts at 10Mbps. */ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4), 0xFFFF); if (ret_val) - goto out; + return ret_val; ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), ®_data); if (ret_val) - goto out; + return ret_val; reg_data |= 0x3F; ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), reg_data); if (ret_val) - goto out; - ret_val = e1000_read_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, ®_data); + return ret_val; + ret_val = + e1000_read_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, + ®_data); if (ret_val) - goto out; + return ret_val; reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING; - ret_val = e1000_write_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, reg_data); + ret_val = + e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, + reg_data); if (ret_val) - goto out; + return ret_val; ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw); if (ret_val) - goto out; - - ret_val = e1000_setup_copper_link_generic(hw); + return ret_val; -out: - return ret_val; + return e1000_setup_copper_link_generic(hw); } /** @@ -1271,7 +1244,7 @@ static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw) ret_val = e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex); if (ret_val) - goto out; + return ret_val; if (speed == SPEED_1000) ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw); @@ -1279,7 +1252,6 @@ static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw) ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw, duplex); } -out: return ret_val; } @@ -1293,7 +1265,7 @@ out: **/ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex) { - s32 ret_val = E1000_SUCCESS; + s32 ret_val; u32 tipg; u32 i = 0; u16 reg_data, reg_data2; @@ -1301,11 +1273,12 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex) DEBUGFUNC("e1000_configure_kmrn_for_10_100"); reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; - ret_val = e1000_write_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, - reg_data); + ret_val = + e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, + reg_data); if (ret_val) - goto out; + return ret_val; /* Configure Transmit Inter-Packet Gap */ tipg = E1000_READ_REG(hw, E1000_TIPG); @@ -1317,12 +1290,12 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex) ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); if (ret_val) - goto out; + return ret_val; ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data2); if (ret_val) - goto out; + return ret_val; i++; } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); @@ -1331,11 +1304,7 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex) else reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; - ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, - reg_data); - -out: - return ret_val; + return hw->phy.ops.write_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); } /** @@ -1347,7 +1316,7 @@ out: **/ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw) { - s32 ret_val = E1000_SUCCESS; + s32 ret_val; u16 reg_data, reg_data2; u32 tipg; u32 i = 0; @@ -1355,10 +1324,12 @@ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw) DEBUGFUNC("e1000_configure_kmrn_for_1000"); reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; - ret_val = e1000_write_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, reg_data); + ret_val = + e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, + reg_data); if (ret_val) - goto out; + return ret_val; /* Configure Transmit Inter-Packet Gap */ tipg = E1000_READ_REG(hw, E1000_TIPG); @@ -1370,21 +1341,18 @@ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw) ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); if (ret_val) - goto out; + return ret_val; ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data2); if (ret_val) - goto out; + return ret_val; i++; } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; - ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, - reg_data); -out: - return ret_val; + return hw->phy.ops.write_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); } /** @@ -1401,13 +1369,13 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, u16 *data) { u32 kmrnctrlsta; - s32 ret_val = E1000_SUCCESS; + s32 ret_val; DEBUGFUNC("e1000_read_kmrn_reg_80003es2lan"); ret_val = e1000_acquire_mac_csr_80003es2lan(hw); if (ret_val) - goto out; + return ret_val; kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; @@ -1421,7 +1389,6 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, e1000_release_mac_csr_80003es2lan(hw); -out: return ret_val; } @@ -1439,13 +1406,13 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, u16 data) { u32 kmrnctrlsta; - s32 ret_val = E1000_SUCCESS; + s32 ret_val; DEBUGFUNC("e1000_write_kmrn_reg_80003es2lan"); ret_val = e1000_acquire_mac_csr_80003es2lan(hw); if (ret_val) - goto out; + return ret_val; kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & E1000_KMRNCTRLSTA_OFFSET) | data; @@ -1456,7 +1423,6 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, e1000_release_mac_csr_80003es2lan(hw); -out: return ret_val; } @@ -1466,23 +1432,19 @@ out: **/ static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw) { - s32 ret_val = E1000_SUCCESS; + s32 ret_val; DEBUGFUNC("e1000_read_mac_addr_80003es2lan"); - /* - * If there's an alternate MAC address place it in RAR0 + /* If there's an alternate MAC address place it in RAR0 * so that it will override the Si installed default perm * address. */ ret_val = e1000_check_alt_mac_addr_generic(hw); if (ret_val) - goto out; - - ret_val = e1000_read_mac_addr_generic(hw); + return ret_val; -out: - return ret_val; + return e1000_read_mac_addr_generic(hw); } /** diff --git a/usr/src/uts/common/io/e1000api/e1000_80003es2lan.h b/usr/src/uts/common/io/e1000api/e1000_80003es2lan.h index 38d4cc0f38..3807e46305 100644 --- a/usr/src/uts/common/io/e1000api/e1000_80003es2lan.h +++ b/usr/src/uts/common/io/e1000api/e1000_80003es2lan.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2011, Intel Corporation + Copyright (c) 2001-2013, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -35,10 +35,6 @@ #ifndef _E1000_80003ES2LAN_H_ #define _E1000_80003ES2LAN_H_ -#ifdef __cplusplus -extern "C" { -#endif - #define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00 #define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02 #define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10 @@ -55,34 +51,32 @@ extern "C" { #define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C #define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004 -#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ +#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gig Carry Extend Padding */ #define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000 #define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8 #define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9 /* GG82563 PHY Specific Status Register (Page 0, Register 16 */ -#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Disabled */ +#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Dis */ #define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060 #define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */ #define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */ #define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */ /* PHY Specific Control Register 2 (Page 0, Register 26) */ -#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 /* 1=Reverse Auto-Nego */ +#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 /* 1=Reverse Auto-Neg */ /* MAC Specific Control Register (Page 2, Register 21) */ /* Tx clock speed for Link Down and 1000BASE-T for the following speeds */ #define GG82563_MSCR_TX_CLK_MASK 0x0007 #define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004 #define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005 -#define GG82563_MSCR_TX_CLK_1000MBPS_2_5 0x0006 #define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007 #define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */ -/* DSP Distance Register (Page 5, Register 26) */ -/* +/* DSP Distance Register (Page 5, Register 26) * 0 = <50M * 1 = 50-80M * 2 = 80-100M @@ -104,8 +98,4 @@ extern "C" { /* In-Band Control Register (Page 194, Register 18) */ #define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */ -#ifdef __cplusplus -} #endif - -#endif /* _E1000_80003ES2LAN_H_ */ diff --git a/usr/src/uts/common/io/e1000api/e1000_82542.c b/usr/src/uts/common/io/e1000api/e1000_82542.c index c0b4e88be9..19d5402831 100644 --- a/usr/src/uts/common/io/e1000api/e1000_82542.c +++ b/usr/src/uts/common/io/e1000api/e1000_82542.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2010, Intel Corporation + Copyright (c) 2001-2014, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -47,7 +47,7 @@ static s32 e1000_init_hw_82542(struct e1000_hw *hw); static s32 e1000_setup_link_82542(struct e1000_hw *hw); static s32 e1000_led_on_82542(struct e1000_hw *hw); static s32 e1000_led_off_82542(struct e1000_hw *hw); -static void e1000_rar_set_82542(struct e1000_hw *hw, u8 *addr, u32 index); +static int e1000_rar_set_82542(struct e1000_hw *hw, u8 *addr, u32 index); static void e1000_clear_hw_cntrs_82542(struct e1000_hw *hw); static s32 e1000_read_mac_addr_82542(struct e1000_hw *hw); @@ -409,7 +409,7 @@ static s32 e1000_led_off_82542(struct e1000_hw *hw) * Sets the receive address array register at index to the address passed * in by addr. **/ -static void e1000_rar_set_82542(struct e1000_hw *hw, u8 *addr, u32 index) +static int e1000_rar_set_82542(struct e1000_hw *hw, u8 *addr, u32 index) { u32 rar_low, rar_high; @@ -431,6 +431,7 @@ static void e1000_rar_set_82542(struct e1000_hw *hw, u8 *addr, u32 index) E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low); E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high); + return E1000_SUCCESS; } /** diff --git a/usr/src/uts/common/io/e1000api/e1000_82543.h b/usr/src/uts/common/io/e1000api/e1000_82543.h index 45a34e4d86..60e5c15dda 100644 --- a/usr/src/uts/common/io/e1000api/e1000_82543.h +++ b/usr/src/uts/common/io/e1000api/e1000_82543.h @@ -35,10 +35,6 @@ #ifndef _E1000_82543_H_ #define _E1000_82543_H_ -#ifdef __cplusplus -extern "C" { -#endif - #define PHY_PREAMBLE 0xFFFFFFFF #define PHY_PREAMBLE_SIZE 32 #define PHY_SOF 0x1 @@ -58,8 +54,4 @@ void e1000_set_tbi_compatibility_82543(struct e1000_hw *hw, bool state); bool e1000_tbi_sbp_enabled_82543(struct e1000_hw *hw); -#ifdef __cplusplus -} #endif - -#endif /* _E1000_82543_H_ */ diff --git a/usr/src/uts/common/io/e1000api/e1000_82571.c b/usr/src/uts/common/io/e1000api/e1000_82571.c index ca7a1d021f..e209d43826 100644 --- a/usr/src/uts/common/io/e1000api/e1000_82571.c +++ b/usr/src/uts/common/io/e1000api/e1000_82571.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2014, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -927,9 +927,9 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, } for (i = 0; i < words; i++) { - eewr = (data[i] << E1000_NVM_RW_REG_DATA) | - ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | - E1000_NVM_RW_REG_START; + eewr = ((data[i] << E1000_NVM_RW_REG_DATA) | + ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) | + E1000_NVM_RW_REG_START); ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); if (ret_val) @@ -1101,8 +1101,6 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) default: break; } - if (ret_val) - DEBUGOUT("Cannot acquire MDIO ownership\n"); ctrl = E1000_READ_REG(hw, E1000_CTRL); @@ -1111,9 +1109,16 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) /* Must release MDIO ownership and mutex after MAC reset. */ switch (hw->mac.type) { + case e1000_82573: + /* Release mutex only if the hw semaphore is acquired */ + if (!ret_val) + e1000_put_hw_semaphore_82573(hw); + break; case e1000_82574: case e1000_82583: - e1000_put_hw_semaphore_82574(hw); + /* Release mutex only if the hw semaphore is acquired */ + if (!ret_val) + e1000_put_hw_semaphore_82574(hw); break; default: break; @@ -1222,8 +1227,8 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw) /* Set the transmit descriptor write-back policy */ reg_data = E1000_READ_REG(hw, E1000_TXDCTL(0)); - reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | - E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg_data); /* ...for both queues. */ @@ -1239,9 +1244,9 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw) break; default: reg_data = E1000_READ_REG(hw, E1000_TXDCTL(1)); - reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | - E1000_TXDCTL_FULL_TX_DESC_WB | - E1000_TXDCTL_COUNT_DESC; + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | + E1000_TXDCTL_COUNT_DESC); E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg_data); break; } @@ -1448,10 +1453,14 @@ static void e1000_clear_vfta_82571(struct e1000_hw *hw) static bool e1000_check_mng_mode_82574(struct e1000_hw *hw) { u16 data; + s32 ret_val; DEBUGFUNC("e1000_check_mng_mode_82574"); - hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &data); + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &data); + if (ret_val) + return FALSE; + return (data & E1000_NVM_INIT_CTRL2_MNGM) != 0; } diff --git a/usr/src/uts/common/io/e1000api/e1000_82571.h b/usr/src/uts/common/io/e1000api/e1000_82571.h index 1911048a7f..41d5df0e13 100644 --- a/usr/src/uts/common/io/e1000api/e1000_82571.h +++ b/usr/src/uts/common/io/e1000api/e1000_82571.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2010, Intel Corporation + Copyright (c) 2001-2014, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -35,39 +35,32 @@ #ifndef _E1000_82571_H_ #define _E1000_82571_H_ -#ifdef __cplusplus -extern "C" { -#endif - -#define ID_LED_RESERVED_F746 0xF746 -#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \ - (ID_LED_OFF1_ON2 << 8) | \ - (ID_LED_DEF1_DEF2 << 4) | \ - (ID_LED_DEF1_DEF2)) +#define ID_LED_RESERVED_F746 0xF746 +#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) -#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 -#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */ +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 +#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */ /* Intr Throttling - RW */ -#define E1000_EITR_82574(_n) (0x000E8 + (0x4 * (_n))) +#define E1000_EITR_82574(_n) (0x000E8 + (0x4 * (_n))) -#define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */ -#define E1000_EIAC_MASK_82574 0x01F00000 +#define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAC_MASK_82574 0x01F00000 -#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */ +#define E1000_IVAR_INT_ALLOC_VALID 0x8 -#define E1000_RXCFGL 0x0B634 /* TimeSync Rx EtherType & Msg Type Reg - RW */ +/* Manageability Operation Mode mask */ +#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 -#define E1000_BASE1000T_STATUS 10 -#define E1000_IDLE_ERROR_COUNT_MASK 0xFF -#define E1000_RECEIVE_ERROR_COUNTER 21 -#define E1000_RECEIVE_ERROR_MAX 0xFFFF +#define E1000_BASE1000T_STATUS 10 +#define E1000_IDLE_ERROR_COUNT_MASK 0xFF +#define E1000_RECEIVE_ERROR_COUNTER 21 +#define E1000_RECEIVE_ERROR_MAX 0xFFFF bool e1000_check_phy_82574(struct e1000_hw *hw); bool e1000_get_laa_state_82571(struct e1000_hw *hw); void e1000_set_laa_state_82571(struct e1000_hw *hw, bool state); -#ifdef __cplusplus -} #endif - -#endif /* _E1000_82571_H_ */ diff --git a/usr/src/uts/common/io/e1000api/e1000_82575.c b/usr/src/uts/common/io/e1000api/e1000_82575.c index e3cea80fc8..02f2590d4f 100644 --- a/usr/src/uts/common/io/e1000api/e1000_82575.c +++ b/usr/src/uts/common/io/e1000api/e1000_82575.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2014, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -52,10 +52,10 @@ static void e1000_release_phy_82575(struct e1000_hw *hw); static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw); static void e1000_release_nvm_82575(struct e1000_hw *hw); static s32 e1000_check_for_link_82575(struct e1000_hw *hw); +static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw); static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw); static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, u16 *duplex); -static s32 e1000_init_hw_82575(struct e1000_hw *hw); static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw); static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, u16 *data); @@ -119,7 +119,8 @@ static bool e1000_get_i2c_data(u32 *i2cctl); static const u16 e1000_82580_rxpbs_table[] = { 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; #define E1000_82580_RXPBS_TABLE_SIZE \ - (sizeof(e1000_82580_rxpbs_table)/sizeof(u16)) + (sizeof(e1000_82580_rxpbs_table) / \ + sizeof(e1000_82580_rxpbs_table[0])) /** @@ -237,18 +238,46 @@ static s32 e1000_init_phy_params_82575(struct e1000_hw *hw) phy->type = e1000_phy_m88; phy->ops.check_polarity = e1000_check_polarity_m88; phy->ops.get_info = e1000_get_phy_info_m88; - switch (phy->id) { - case I347AT4_E_PHY_ID: - case M88E1112_E_PHY_ID: - case M88E1340M_E_PHY_ID: - case M88E1543_E_PHY_ID: - case M88E1512_E_PHY_ID: + if (phy->id == I347AT4_E_PHY_ID || + phy->id == M88E1112_E_PHY_ID || + phy->id == M88E1340M_E_PHY_ID) phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2; - default: + else if (phy->id == M88E1543_E_PHY_ID || + phy->id == M88E1512_E_PHY_ID) + phy->ops.get_cable_length = + e1000_get_cable_length_m88_gen2; + else phy->ops.get_cable_length = e1000_get_cable_length_m88; - } phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; + /* Check if this PHY is confgured for media swap. */ + if (phy->id == M88E1112_E_PHY_ID) { + u16 data; + + ret_val = phy->ops.write_reg(hw, + E1000_M88E1112_PAGE_ADDR, + 2); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, + E1000_M88E1112_MAC_CTRL_1, + &data); + if (ret_val) + goto out; + + data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >> + E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT; + if (data == E1000_M88E1112_AUTO_COPPER_SGMII || + data == E1000_M88E1112_AUTO_COPPER_BASEX) + hw->mac.ops.check_for_link = + e1000_check_for_link_media_swap; + } + if (phy->id == M88E1512_E_PHY_ID) { + ret_val = e1000_initialize_M88E1512_phy(hw); + if (ret_val) + goto out; + } break; case IGP03E1000_E_PHY_ID: case IGP04E1000_E_PHY_ID: @@ -426,6 +455,9 @@ static s32 e1000_init_mac_params_82575(struct e1000_hw *hw) else mac->ops.reset_hw = e1000_reset_hw_82575; /* hw initialization */ + if ((mac->type == e1000_i210) || (mac->type == e1000_i211)) + mac->ops.init_hw = e1000_init_hw_i210; + else mac->ops.init_hw = e1000_init_hw_82575; /* link setup */ mac->ops.setup_link = e1000_setup_link_generic; @@ -445,7 +477,7 @@ static s32 e1000_init_mac_params_82575(struct e1000_hw *hw) mac->ops.config_collision_dist = e1000_config_collision_dist_82575; /* multicast address update */ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; - if (mac->type == e1000_i350 || mac->type == e1000_i354) { + if (hw->mac.type == e1000_i350 || mac->type == e1000_i354) { /* writing VFTA */ mac->ops.write_vfta = e1000_write_vfta_i350; /* clearing VFTA */ @@ -726,6 +758,7 @@ out: static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; + struct e1000_phy_info *phy = &hw->phy; DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575"); @@ -748,7 +781,11 @@ static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) goto out; ret_val = hw->phy.ops.commit(hw); + if (ret_val) + goto out; + if (phy->id == M88E1512_E_PHY_ID) + ret_val = e1000_initialize_M88E1512_phy(hw); out: return ret_val; } @@ -855,7 +892,6 @@ out: static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) { struct e1000_phy_info *phy = &hw->phy; - s32 ret_val = E1000_SUCCESS; u32 data; DEBUGFUNC("e1000_set_d0_lplu_state_82580"); @@ -883,7 +919,7 @@ static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) } E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); - return ret_val; + return E1000_SUCCESS; } /** @@ -903,7 +939,6 @@ static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) { struct e1000_phy_info *phy = &hw->phy; - s32 ret_val = E1000_SUCCESS; u32 data; DEBUGFUNC("e1000_set_d3_lplu_state_82580"); @@ -931,7 +966,7 @@ s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) } E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); - return ret_val; + return E1000_SUCCESS; } /** @@ -945,7 +980,7 @@ s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) **/ static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw) { - s32 ret_val; + s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_acquire_nvm_82575"); @@ -967,6 +1002,7 @@ static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw) DEBUGOUT("Nvm bit banging access error detected and cleared.\n"); } } + if (hw->mac.type == e1000_82580) { u32 eecd = E1000_READ_REG(hw, E1000_EECD); if (eecd & E1000_EECD_BLOCKED) { @@ -977,7 +1013,6 @@ static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw) } } - ret_val = e1000_acquire_nvm_generic(hw); if (ret_val) e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); @@ -1091,7 +1126,6 @@ static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw) { s32 timeout = PHY_CFG_TIMEOUT; - s32 ret_val = E1000_SUCCESS; u32 mask = E1000_NVM_CFG_DONE_PORT_0; DEBUGFUNC("e1000_get_cfg_done_82575"); @@ -1116,7 +1150,7 @@ static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw) (hw->phy.type == e1000_phy_igp_3)) e1000_phy_init_script_igp3(hw); - return ret_val; + return E1000_SUCCESS; } /** @@ -1187,6 +1221,61 @@ static s32 e1000_check_for_link_82575(struct e1000_hw *hw) } /** + * e1000_check_for_link_media_swap - Check which M88E1112 interface linked + * @hw: pointer to the HW structure + * + * Poll the M88E1112 interfaces to see which interface achieved link. + */ +static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + u8 port = 0; + + DEBUGFUNC("e1000_check_for_link_media_swap"); + + /* Check the copper medium. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); + if (ret_val) + return ret_val; + + if (data & E1000_M88E1112_STATUS_LINK) + port = E1000_MEDIA_PORT_COPPER; + + /* Check the other medium. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); + if (ret_val) + return ret_val; + + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + + if (data & E1000_M88E1112_STATUS_LINK) + port = E1000_MEDIA_PORT_OTHER; + + /* Determine if a swap needs to happen. */ + if (port && (hw->dev_spec._82575.media_port != port)) { + hw->dev_spec._82575.media_port = port; + hw->dev_spec._82575.media_changed = TRUE; + } else { + ret_val = e1000_check_for_link_82575(hw); + } + + return E1000_SUCCESS; +} + +/** * e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown * @hw: pointer to the HW structure **/ @@ -1387,7 +1476,7 @@ static s32 e1000_reset_hw_82575(struct e1000_hw *hw) * * This inits the hardware readying it for operation. **/ -static s32 e1000_init_hw_82575(struct e1000_hw *hw) +s32 e1000_init_hw_82575(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val; @@ -1492,6 +1581,8 @@ static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw) case I347AT4_E_PHY_ID: case M88E1112_E_PHY_ID: case M88E1340M_E_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: case I210_I_PHY_ID: ret_val = e1000_copper_link_setup_m88_gen2(hw); break; @@ -1904,7 +1995,7 @@ static s32 e1000_reset_init_script_82575(struct e1000_hw *hw) **/ static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw) { - s32 ret_val = E1000_SUCCESS; + s32 ret_val; DEBUGFUNC("e1000_read_mac_addr_82575"); @@ -2179,17 +2270,18 @@ void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) default: return; } + reg_val = E1000_READ_REG(hw, reg_offset); if (enable) { reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK | - E1000_DTXSWC_VLAN_SPOOF_MASK); + E1000_DTXSWC_VLAN_SPOOF_MASK); /* The PF can spoof - it has to in order to * support emulation mode NICs */ reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); } else { reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | - E1000_DTXSWC_VLAN_SPOOF_MASK); + E1000_DTXSWC_VLAN_SPOOF_MASK); } E1000_WRITE_REG(hw, reg_offset, reg_val); } @@ -2396,11 +2488,17 @@ static s32 e1000_reset_hw_82580(struct e1000_hw *hw) ctrl |= E1000_CTRL_RST; E1000_WRITE_REG(hw, E1000_CTRL, ctrl); - E1000_WRITE_FLUSH(hw); - /* Add delay to insure DEV_RST has time to complete */ - if (global_device_reset) - msec_delay(5); + switch (hw->device_id) { + case E1000_DEV_ID_DH89XXCC_SGMII: + break; + default: + E1000_WRITE_FLUSH(hw); + break; + } + + /* Add delay to insure DEV_RST or RST has time to complete */ + msec_delay(5); ret_val = e1000_get_auto_rd_done_generic(hw); if (ret_val) { @@ -2412,10 +2510,6 @@ static s32 e1000_reset_hw_82580(struct e1000_hw *hw) DEBUGOUT("Auto Read Done did not complete\n"); } - /* If EEPROM is not present, run manual init scripts */ - if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES)) - e1000_reset_init_script_82575(hw); - /* clear global device reset status bit */ E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET); @@ -2539,7 +2633,7 @@ out: **/ static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw) { - s32 ret_val = E1000_SUCCESS; + s32 ret_val; u16 eeprom_regions_count = 1; u16 j, nvm_data; u16 nvm_offset; @@ -2670,6 +2764,134 @@ out: } /** + * __e1000_access_emi_reg - Read/write EMI register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: pointer to value to read/write from/to the EMI address + * @read: boolean flag to indicate read or write + **/ +static s32 __e1000_access_emi_reg(struct e1000_hw *hw, u16 address, + u16 *data, bool read) +{ + s32 ret_val; + + DEBUGFUNC("__e1000_access_emi_reg"); + + ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data); + else + ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data); + + return ret_val; +} + +/** + * e1000_read_emi_reg - Read Extended Management Interface register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: value to be read from the EMI address + **/ +s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) +{ + DEBUGFUNC("e1000_read_emi_reg"); + + return __e1000_access_emi_reg(hw, addr, data, TRUE); +} + +/** + * e1000_initialize_M88E1512_phy - Initialize M88E1512 PHY + * @hw: pointer to the HW structure + * + * Initialize Marverl 1512 to work correctly with Avoton. + **/ +s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_initialize_M88E1512_phy"); + + /* Check if this is correct PHY. */ + if (phy->id != M88E1512_E_PHY_ID) + goto out; + + /* Switch to PHY page 0xFF. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); + if (ret_val) + goto out; + + /* Switch to PHY page 0xFB. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D); + if (ret_val) + goto out; + + /* Switch to PHY page 0x12. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); + if (ret_val) + goto out; + + /* Change mode to SGMII-to-Copper */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + + msec_delay(1000); +out: + return ret_val; +} + +/** * e1000_set_eee_i350 - Enable/disable EEE support * @hw: pointer to the HW structure * @@ -2678,7 +2900,6 @@ out: **/ s32 e1000_set_eee_i350(struct e1000_hw *hw) { - s32 ret_val = E1000_SUCCESS; u32 ipcnfg, eeer; DEBUGFUNC("e1000_set_eee_i350"); @@ -2711,7 +2932,7 @@ s32 e1000_set_eee_i350(struct e1000_hw *hw) E1000_READ_REG(hw, E1000_EEER); out: - return ret_val; + return E1000_SUCCESS; } /** @@ -2761,7 +2982,7 @@ s32 e1000_set_eee_i354(struct e1000_hw *hw) E1000_EEE_ADV_DEV_I354, &phy_data); if (ret_val) - goto out; + goto out; phy_data |= E1000_EEE_ADV_100_SUPPORTED | E1000_EEE_ADV_1000_SUPPORTED; @@ -2806,7 +3027,7 @@ s32 e1000_get_eee_status_i354(struct e1000_hw *hw, bool *status) /* Check if EEE is supported on this device. */ if ((hw->phy.media_type != e1000_media_type_copper) || ((phy->id != M88E1543_E_PHY_ID) && - (phy->id != M88E1512_E_PHY_ID))) + (phy->id != M88E1512_E_PHY_ID))) goto out; ret_val = e1000_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, @@ -3419,4 +3640,3 @@ void e1000_i2c_bus_clear(struct e1000_hw *hw) e1000_i2c_stop(hw); } - diff --git a/usr/src/uts/common/io/e1000api/e1000_82575.h b/usr/src/uts/common/io/e1000api/e1000_82575.h index a6fe636d3a..bc550c06a4 100644 --- a/usr/src/uts/common/io/e1000api/e1000_82575.h +++ b/usr/src/uts/common/io/e1000api/e1000_82575.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2014, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -249,6 +249,8 @@ union e1000_adv_rx_desc { #define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 /* RSS Packet Types as indicated in the receive descriptor */ +#define E1000_RXDADV_PKTTYPE_ILMASK 0x000000F0 +#define E1000_RXDADV_PKTTYPE_TLMASK 0x00000F00 #define E1000_RXDADV_PKTTYPE_NONE 0x00000000 #define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */ #define E1000_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */ @@ -386,7 +388,7 @@ struct e1000_adv_tx_context_desc { #define E1000_ETQF_FILTER_ENABLE (1 << 26) #define E1000_ETQF_IMM_INT (1 << 29) #define E1000_ETQF_1588 (1 << 30) -#define E1000_ETQF_QUEUE_ENABLE (1 << 31) +#define E1000_ETQF_QUEUE_ENABLE (1UL << 31) /* * ETQF filter list: one static filter per filter consumer. This is * to avoid filter collisions later. Add new filters @@ -482,6 +484,7 @@ void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable); void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf); void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable); s32 e1000_init_nvm_params_82575(struct e1000_hw *hw); +s32 e1000_init_hw_82575(struct e1000_hw *hw); enum e1000_promisc_type { e1000_promisc_disabled = 0, /* all promisc modes disabled */ @@ -495,9 +498,11 @@ void e1000_vfta_set_vf(struct e1000_hw *, u16, bool); void e1000_rlpml_set_vf(struct e1000_hw *, u16); s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type type); u16 e1000_rxpbs_adjust_82580(u32 data); +s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data); s32 e1000_set_eee_i350(struct e1000_hw *); s32 e1000_set_eee_i354(struct e1000_hw *); s32 e1000_get_eee_status_i354(struct e1000_hw *, bool *); +s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw); /* I2C SDA and SCL timing parameters for standard mode */ #define E1000_I2C_T_HD_STA 4 diff --git a/usr/src/uts/common/io/e1000api/e1000_api.c b/usr/src/uts/common/io/e1000api/e1000_api.c index cae5267c56..374ffa678e 100644 --- a/usr/src/uts/common/io/e1000api/e1000_api.c +++ b/usr/src/uts/common/io/e1000api/e1000_api.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2014, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -293,6 +293,10 @@ s32 e1000_set_mac_type(struct e1000_hw *hw) case E1000_DEV_ID_PCH_LPT_I217_V: case E1000_DEV_ID_PCH_LPTLP_I218_LM: case E1000_DEV_ID_PCH_LPTLP_I218_V: + case E1000_DEV_ID_PCH_I218_LM2: + case E1000_DEV_ID_PCH_I218_V2: + case E1000_DEV_ID_PCH_I218_LM3: + case E1000_DEV_ID_PCH_I218_V3: mac->type = e1000_pch_lpt; break; case E1000_DEV_ID_82575EB_COPPER: @@ -329,9 +333,8 @@ s32 e1000_set_mac_type(struct e1000_hw *hw) case E1000_DEV_ID_I350_DA4: mac->type = e1000_i350; break; -#if defined(QV_RELEASE) && defined(SPRINGVILLE_FLASHLESS_HW) - case E1000_DEV_ID_I210_NVMLESS: -#endif /* QV_RELEASE && SPRINGVILLE_FLASHLESS_HW */ + case E1000_DEV_ID_I210_COPPER_FLASHLESS: + case E1000_DEV_ID_I210_SERDES_FLASHLESS: case E1000_DEV_ID_I210_COPPER: case E1000_DEV_ID_I210_COPPER_OEM1: case E1000_DEV_ID_I210_COPPER_IT: @@ -351,12 +354,12 @@ s32 e1000_set_mac_type(struct e1000_hw *hw) case E1000_DEV_ID_I350_VF_HV: mac->type = e1000_vfadapt_i350; break; + case E1000_DEV_ID_I354_BACKPLANE_1GBPS: case E1000_DEV_ID_I354_SGMII: case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS: mac->type = e1000_i354; break; - default: /* Should never have loaded on this device */ ret_val = -E1000_ERR_MAC_INIT; @@ -829,10 +832,12 @@ void e1000_config_collision_dist(struct e1000_hw *hw) * * Sets a Receive Address Register (RAR) to the specified address. **/ -void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +int e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) { if (hw->mac.ops.rar_set) - hw->mac.ops.rar_set(hw, addr, index); + return hw->mac.ops.rar_set(hw, addr, index); + + return E1000_SUCCESS; } /** diff --git a/usr/src/uts/common/io/e1000api/e1000_api.h b/usr/src/uts/common/io/e1000api/e1000_api.h index 0898b811b7..a2ffa16936 100644 --- a/usr/src/uts/common/io/e1000api/e1000_api.h +++ b/usr/src/uts/common/io/e1000api/e1000_api.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2014, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -35,10 +35,6 @@ #ifndef _E1000_API_H_ #define _E1000_API_H_ -#ifdef __cplusplus -extern "C" { -#endif - #include "e1000_hw.h" extern void e1000_init_function_pointers_82542(struct e1000_hw *hw); @@ -73,7 +69,7 @@ s32 e1000_setup_link(struct e1000_hw *hw); s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex); s32 e1000_disable_pcie_master(struct e1000_hw *hw); void e1000_config_collision_dist(struct e1000_hw *hw); -void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); +int e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr); void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, u32 mc_addr_count); @@ -168,9 +164,4 @@ u32 e1000_translate_register_82542(u32 reg); #define E1000_MAX(a, b) ((a) > (b) ? (a) : (b)) #define E1000_DIVIDE_ROUND_UP(a, b) (((a) + (b) - 1) / (b)) /* ceil(a/b) */ - -#ifdef __cplusplus -} -#endif - #endif /* _E1000_API_H_ */ diff --git a/usr/src/uts/common/io/e1000api/e1000_defines.h b/usr/src/uts/common/io/e1000api/e1000_defines.h index 05573c3bc7..b0faf8c0d2 100644 --- a/usr/src/uts/common/io/e1000api/e1000_defines.h +++ b/usr/src/uts/common/io/e1000api/e1000_defines.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2014, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -43,6 +43,8 @@ /* Wake Up Control */ #define E1000_WUC_APME 0x00000001 /* APM Enable */ #define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ #define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ /* Wake Up Filter Control */ @@ -75,6 +77,7 @@ #define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ /* Physical Func Reset Done Indication */ #define E1000_CTRL_EXT_PFRSTD 0x00004000 +#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */ #define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ #define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ #define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clk Gating */ @@ -155,6 +158,7 @@ E1000_RXDEXT_STATERR_CXE | \ E1000_RXDEXT_STATERR_RXE) +#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001 #define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 #define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 #define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 @@ -287,10 +291,10 @@ #define E1000_CONNSW_ENRGSRC 0x4 #define E1000_CONNSW_PHYSD 0x400 -#define E1000_CONNSW_PHY_PDN 0x800 +#define E1000_CONNSW_PHY_PDN 0x800 #define E1000_CONNSW_SERDESD 0x200 -#define E1000_CONNSW_AUTOSENSE_CONF 0x2 -#define E1000_CONNSW_AUTOSENSE_EN 0x1 +#define E1000_CONNSW_AUTOSENSE_CONF 0x2 +#define E1000_CONNSW_AUTOSENSE_EN 0x1 #define E1000_PCS_CFG_PCS_EN 8 #define E1000_PCS_LCTL_FLV_LINK_UP 1 #define E1000_PCS_LCTL_FSV_10 0 @@ -328,8 +332,8 @@ #define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */ #define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ #define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ -#define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */ -#define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */ +#define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */ +#define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */ #define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ #define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ @@ -341,7 +345,7 @@ #define SPEED_10 10 #define SPEED_100 100 #define SPEED_1000 1000 -#define SPEED_2500 2500 +#define SPEED_2500 2500 #define HALF_DUPLEX 1 #define FULL_DUPLEX 2 @@ -462,6 +466,7 @@ #define ETHERNET_FCS_SIZE 4 #define MAX_JUMBO_FRAME_SIZE 0x3F00 +#define E1000_TX_PTR_GAP 0x1F /* Extended Configuration Control and Size */ #define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 @@ -656,7 +661,7 @@ #define E1000_EITR_ITR_INT_MASK 0x0000FFFF /* E1000_EITR_CNT_IGNR is only for 82576 and newer */ #define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ -#define E1000_EITR_INTERVAL 0x00007FFC +#define E1000_EITR_INTERVAL 0x00007FFC /* Transmit Descriptor Control */ #define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ @@ -812,6 +817,17 @@ #define E1000_MDICNFG_PHY_MASK 0x03E00000 #define E1000_MDICNFG_PHY_SHIFT 21 +#define E1000_MEDIA_PORT_COPPER 1 +#define E1000_MEDIA_PORT_OTHER 2 +#define E1000_M88E1112_AUTO_COPPER_SGMII 0x2 +#define E1000_M88E1112_AUTO_COPPER_BASEX 0x3 +#define E1000_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */ +#define E1000_M88E1112_MAC_CTRL_1 0x10 +#define E1000_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */ +#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7 +#define E1000_M88E1112_PAGE_ADDR 0x16 +#define E1000_M88E1112_STATUS 0x01 + #define E1000_THSTAT_LOW_EVENT 0x20000000 /* Low thermal threshold */ #define E1000_THSTAT_MID_EVENT 0x00200000 /* Mid thermal threshold */ #define E1000_THSTAT_HIGH_EVENT 0x00002000 /* High thermal threshold */ @@ -828,20 +844,25 @@ #define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ #define E1000_EEER_RX_LPI_STATUS 0x40000000 /* Rx in LPI state */ #define E1000_EEER_TX_LPI_STATUS 0x80000000 /* Tx in LPI state */ -#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */ -#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */ -#define E1000_M88E1543_EEE_CTRL_1 0x0 -#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ -#define E1000_EEE_ADV_DEV_I354 7 -#define E1000_EEE_ADV_ADDR_I354 60 -#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */ -#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */ -#define E1000_PCS_STATUS_DEV_I354 3 -#define E1000_PCS_STATUS_ADDR_I354 1 -#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400 -#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800 - +#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */ +#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */ +#define E1000_M88E1543_EEE_CTRL_1 0x0 +#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ +#define E1000_EEE_ADV_DEV_I354 7 +#define E1000_EEE_ADV_ADDR_I354 60 +#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */ +#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */ +#define E1000_PCS_STATUS_DEV_I354 3 +#define E1000_PCS_STATUS_ADDR_I354 1 +#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400 +#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800 +#define E1000_M88E1512_CFG_REG_1 0x0010 +#define E1000_M88E1512_CFG_REG_2 0x0011 +#define E1000_M88E1512_CFG_REG_3 0x0007 +#define E1000_M88E1512_MODE 0x0014 #define E1000_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */ +#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */ +#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */ /* PCI Express Control */ #define E1000_GCR_RXD_NO_SNOOP 0x00000001 #define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 @@ -854,7 +875,6 @@ #define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 #define E1000_GCR_CAP_VER2 0x00040000 - #define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ E1000_GCR_RXDSCW_NO_SNOOP | \ E1000_GCR_RXDSCR_NO_SNOOP | \ @@ -862,7 +882,7 @@ E1000_GCR_TXDSCW_NO_SNOOP | \ E1000_GCR_TXDSCR_NO_SNOOP) -#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ +#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ /* mPHY address control and data registers */ #define E1000_MPHY_ADDR_CTL 0x0024 /* Address Control Reg */ @@ -1197,8 +1217,8 @@ #define M88E1011_I_PHY_ID 0x01410C20 #define IGP01E1000_I_PHY_ID 0x02A80380 #define M88E1111_I_PHY_ID 0x01410CC0 -#define M88E1543_E_PHY_ID 0x01410EA0 -#define M88E1512_E_PHY_ID 0x01410DD0 +#define M88E1543_E_PHY_ID 0x01410EA0 +#define M88E1512_E_PHY_ID 0x01410DD0 #define M88E1112_E_PHY_ID 0x01410C90 #define I347AT4_E_PHY_ID 0x01410DC0 #define M88E1340M_E_PHY_ID 0x01410DF0 @@ -1421,6 +1441,9 @@ #define E1000_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */ #define E1000_RXPBS_SIZE_I210_MASK 0x0000003F /* Rx packet buffer size */ #define E1000_TXPB0S_SIZE_I210_MASK 0x0000003F /* Tx packet buffer 0 size */ +#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ + #define E1000_DOBFFCTL_OBFFTHR_MASK 0x000000FF /* OBFF threshold */ #define E1000_DOBFFCTL_EXIT_ACT_MASK 0x01000000 /* Exit active CB */ @@ -1446,4 +1469,8 @@ /* Lan ID bit field offset in status register */ #define E1000_STATUS_LAN_ID_OFFSET 2 #define E1000_VFTA_ENTRIES 128 +#define E1000_UNUSEDARG +#ifndef ERROR_REPORT +#define ERROR_REPORT(fmt) do { } while (0) +#endif /* ERROR_REPORT */ #endif /* _E1000_DEFINES_H_ */ diff --git a/usr/src/uts/common/io/e1000api/e1000_hw.h b/usr/src/uts/common/io/e1000api/e1000_hw.h index 7e8365a5a0..9ceb0b891f 100644 --- a/usr/src/uts/common/io/e1000api/e1000_hw.h +++ b/usr/src/uts/common/io/e1000api/e1000_hw.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2014, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -137,6 +137,10 @@ struct e1000_hw; #define E1000_DEV_ID_PCH_LPT_I217_V 0x153B #define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A #define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559 +#define E1000_DEV_ID_PCH_I218_LM2 0x15A0 +#define E1000_DEV_ID_PCH_I218_V2 0x15A1 +#define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */ +#define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */ #define E1000_DEV_ID_82576 0x10C9 #define E1000_DEV_ID_82576_FIBER 0x10E6 #define E1000_DEV_ID_82576_SERDES 0x10E7 @@ -169,10 +173,12 @@ struct e1000_hw; #define E1000_DEV_ID_I210_FIBER 0x1536 #define E1000_DEV_ID_I210_SERDES 0x1537 #define E1000_DEV_ID_I210_SGMII 0x1538 +#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B +#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C #define E1000_DEV_ID_I211_COPPER 0x1539 #define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40 #define E1000_DEV_ID_I354_SGMII 0x1F41 -#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45 +#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45 #define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 #define E1000_DEV_ID_DH89XXCC_SERDES 0x043A #define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C @@ -246,6 +252,7 @@ enum e1000_nvm_type { e1000_nvm_eeprom_spi, e1000_nvm_eeprom_microwire, e1000_nvm_flash_hw, + e1000_nvm_invm, e1000_nvm_flash_sw }; @@ -360,6 +367,9 @@ enum e1000_serdes_link_state { e1000_serdes_link_forced_up }; +#define __le16 u16 +#define __le32 u32 +#define __le64 u64 /* Receive Descriptor */ struct e1000_rx_desc { __le64 buffer_addr; /* Address of the descriptor's data buffer */ @@ -396,6 +406,10 @@ union e1000_rx_desc_extended { }; #define MAX_PS_BUFFERS 4 + +/* Number of packet split data buffers (not including the header buffer) */ +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) + /* Receive Descriptor - Packet Split */ union e1000_rx_desc_packet_split { struct { @@ -420,7 +434,8 @@ union e1000_rx_desc_packet_split { } middle; struct { __le16 header_status; - __le16 length[3]; /* length of buffers 1-3 */ + /* length of buffers 1-3 */ + __le16 length[PS_PAGE_BUFFERS]; } upper; __le64 reserved; } wb; /* writeback */ @@ -689,7 +704,7 @@ struct e1000_mac_operations { s32 (*setup_led)(struct e1000_hw *); void (*write_vfta)(struct e1000_hw *, u32, u32); void (*config_collision_dist)(struct e1000_hw *); - void (*rar_set)(struct e1000_hw *, u8*, u32); + int (*rar_set)(struct e1000_hw *, u8*, u32); s32 (*read_mac_addr)(struct e1000_hw *); s32 (*validate_mdi_setting)(struct e1000_hw *); s32 (*set_obff_timer)(struct e1000_hw *, u32); @@ -932,6 +947,13 @@ struct e1000_shadow_ram { #define E1000_SHADOW_RAM_WORDS 2048 +/* I218 PHY Ultra Low Power (ULP) states */ +enum e1000_ulp_state { + e1000_ulp_state_unknown, + e1000_ulp_state_off, + e1000_ulp_state_on, +}; + struct e1000_dev_spec_ich8lan { bool kmrn_lock_loss_workaround_enabled; struct e1000_shadow_ram shadow_ram[E1000_SHADOW_RAM_WORDS]; @@ -940,6 +962,7 @@ struct e1000_dev_spec_ich8lan { bool nvm_k1_enabled; bool eee_disable; u16 eee_lp_ability; + enum e1000_ulp_state ulp_state; }; struct e1000_dev_spec_82575 { @@ -950,6 +973,8 @@ struct e1000_dev_spec_82575 { bool clear_semaphore_once; u32 mtu; struct sfp_e1000_flags eth_flags; + u8 media_port; + bool media_changed; }; struct e1000_dev_spec_vf { diff --git a/usr/src/uts/common/io/e1000api/e1000_i210.c b/usr/src/uts/common/io/e1000api/e1000_i210.c index 76e70ce141..f12c13f0ca 100644 --- a/usr/src/uts/common/io/e1000api/e1000_i210.c +++ b/usr/src/uts/common/io/e1000api/e1000_i210.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2014, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -42,8 +42,6 @@ static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); static s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw); static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data); -static s32 e1000_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words, - u16 *data); /** * e1000_acquire_nvm_i210 - Request for access to EEPROM @@ -180,9 +178,8 @@ static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw) } if (i == timeout) { - /* - * In rare circumstances, the driver may not have released the - * SW semaphore. Clear the semaphore once before giving up. + /* In rare circumstances, the SW semaphore may already be held + * unintentionally. Clear the semaphore once before giving up. */ if (hw->dev_spec._82575.clear_semaphore_once) { hw->dev_spec._82575.clear_semaphore_once = FALSE; @@ -368,60 +365,104 @@ out: return ret_val; } -/** - * e1000_read_nvm_i211 - Read NVM wrapper function for I211 +/** e1000_read_invm_word_i210 - Reads OTP + * @hw: pointer to the HW structure + * @address: the word address (aka eeprom offset) to read + * @data: pointer to the data read + * + * Reads 16-bit words from the OTP. Return error when the word is not + * stored in OTP. + **/ +static s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) +{ + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u32 invm_dword; + u16 i; + u8 record_type, word_address; + + DEBUGFUNC("e1000_read_invm_word_i210"); + + for (i = 0; i < E1000_INVM_SIZE; i++) { + invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i)); + /* Get record type */ + record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); + if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) + break; + if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE) + i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE) + i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) { + word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); + if (word_address == address) { + *data = INVM_DWORD_TO_WORD_DATA(invm_dword); + DEBUGOUT2("Read INVM Word 0x%02x = %x", + address, *data); + status = E1000_SUCCESS; + break; + } + } + } + if (status != E1000_SUCCESS) + DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address); + return status; +} + +/** e1000_read_invm_i210 - Read invm wrapper function for I210/I211 * @hw: pointer to the HW structure * @address: the word address (aka eeprom offset) to read * @data: pointer to the data read * * Wrapper function to return data formerly found in the NVM. **/ -static s32 e1000_read_nvm_i211(struct e1000_hw *hw, u16 offset, - u16 words, u16 *data) +static s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset, + u16 E1000_UNUSEDARG words, u16 *data) { s32 ret_val = E1000_SUCCESS; - DEBUGFUNC("e1000_read_nvm_i211"); + DEBUGFUNC("e1000_read_invm_i210"); /* Only the MAC addr is required to be present in the iNVM */ switch (offset) { case NVM_MAC_ADDR: - ret_val = e1000_read_invm_i211(hw, (u8)offset, &data[0]); - ret_val |= e1000_read_invm_i211(hw, (u8)offset+1, &data[1]); - ret_val |= e1000_read_invm_i211(hw, (u8)offset+2, &data[2]); + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]); + ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+1, + &data[1]); + ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+2, + &data[2]); if (ret_val != E1000_SUCCESS) DEBUGOUT("MAC Addr not found in iNVM\n"); break; case NVM_INIT_CTRL_2: - ret_val = e1000_read_invm_i211(hw, (u8)offset, data); + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); if (ret_val != E1000_SUCCESS) { *data = NVM_INIT_CTRL_2_DEFAULT_I211; ret_val = E1000_SUCCESS; } break; case NVM_INIT_CTRL_4: - ret_val = e1000_read_invm_i211(hw, (u8)offset, data); + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); if (ret_val != E1000_SUCCESS) { *data = NVM_INIT_CTRL_4_DEFAULT_I211; ret_val = E1000_SUCCESS; } break; case NVM_LED_1_CFG: - ret_val = e1000_read_invm_i211(hw, (u8)offset, data); + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); if (ret_val != E1000_SUCCESS) { *data = NVM_LED_1_CFG_DEFAULT_I211; ret_val = E1000_SUCCESS; } break; case NVM_LED_0_2_CFG: - ret_val = e1000_read_invm_i211(hw, (u8)offset, data); + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); if (ret_val != E1000_SUCCESS) { *data = NVM_LED_0_2_CFG_DEFAULT_I211; ret_val = E1000_SUCCESS; } break; case NVM_ID_LED_SETTINGS: - ret_val = e1000_read_invm_i211(hw, (u8)offset, data); + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); if (ret_val != E1000_SUCCESS) { *data = ID_LED_RESERVED_FFFF; ret_val = E1000_SUCCESS; @@ -448,50 +489,6 @@ static s32 e1000_read_nvm_i211(struct e1000_hw *hw, u16 offset, } /** - * e1000_read_invm_i211 - Reads OTP - * @hw: pointer to the HW structure - * @address: the word address (aka eeprom offset) to read - * @data: pointer to the data read - * - * Reads 16-bit words from the OTP. Return error when the word is not - * stored in OTP. - **/ -s32 e1000_read_invm_i211(struct e1000_hw *hw, u8 address, u16 *data) -{ - s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; - u32 invm_dword; - u16 i; - u8 record_type, word_address; - - DEBUGFUNC("e1000_read_invm_i211"); - - for (i = 0; i < E1000_INVM_SIZE; i++) { - invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i)); - /* Get record type */ - record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); - if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) - break; - if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE) - i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; - if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE) - i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; - if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) { - word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); - if (word_address == address) { - *data = INVM_DWORD_TO_WORD_DATA(invm_dword); - DEBUGOUT2("Read INVM Word 0x%02x = %x", - address, *data); - status = E1000_SUCCESS; - break; - } - } - } - if (status != E1000_SUCCESS) - DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address); - return status; -} - -/** * e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum * @hw: pointer to the HW structure * @@ -539,7 +536,7 @@ s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw) **/ s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw) { - s32 ret_val = E1000_SUCCESS; + s32 ret_val; u16 checksum = 0; u16 i, nvm_data; @@ -592,13 +589,33 @@ out: } /** + * e1000_get_flash_presence_i210 - Check if flash device is detected. + * @hw: pointer to the HW structure + * + **/ +bool e1000_get_flash_presence_i210(struct e1000_hw *hw) +{ + u32 eec = 0; + bool ret_val = FALSE; + + DEBUGFUNC("e1000_get_flash_presence_i210"); + + eec = E1000_READ_REG(hw, E1000_EECD); + + if (eec & E1000_EECD_FLASH_DETECTED_I210) + ret_val = TRUE; + + return ret_val; +} + +/** * e1000_update_flash_i210 - Commit EEPROM to the flash * @hw: pointer to the HW structure * **/ s32 e1000_update_flash_i210(struct e1000_hw *hw) { - s32 ret_val = E1000_SUCCESS; + s32 ret_val; u32 flup; DEBUGFUNC("e1000_update_flash_i210"); @@ -650,52 +667,36 @@ s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw) * e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers * @hw: pointer to the HW structure * - * Initialize the i210 NVM parameters and function pointers. + * Initialize the i210/i211 NVM parameters and function pointers. **/ static s32 e1000_init_nvm_params_i210(struct e1000_hw *hw) { - s32 ret_val = E1000_SUCCESS; + s32 ret_val; struct e1000_nvm_info *nvm = &hw->nvm; DEBUGFUNC("e1000_init_nvm_params_i210"); ret_val = e1000_init_nvm_params_82575(hw); - nvm->ops.acquire = e1000_acquire_nvm_i210; nvm->ops.release = e1000_release_nvm_i210; - nvm->ops.read = e1000_read_nvm_srrd_i210; - nvm->ops.write = e1000_write_nvm_srwr_i210; nvm->ops.valid_led_default = e1000_valid_led_default_i210; - nvm->ops.validate = e1000_validate_nvm_checksum_i210; - nvm->ops.update = e1000_update_nvm_checksum_i210; - + if (e1000_get_flash_presence_i210(hw)) { + hw->nvm.type = e1000_nvm_flash_hw; + nvm->ops.read = e1000_read_nvm_srrd_i210; + nvm->ops.write = e1000_write_nvm_srwr_i210; + nvm->ops.validate = e1000_validate_nvm_checksum_i210; + nvm->ops.update = e1000_update_nvm_checksum_i210; + } else { + hw->nvm.type = e1000_nvm_invm; + nvm->ops.read = e1000_read_invm_i210; + nvm->ops.write = e1000_null_write_nvm; + nvm->ops.validate = e1000_null_ops_generic; + nvm->ops.update = e1000_null_ops_generic; + } return ret_val; } /** - * e1000_init_nvm_params_i211 - Initialize i211 NVM function pointers - * @hw: pointer to the HW structure - * - * Initialize the NVM parameters and function pointers for i211. - **/ -static s32 e1000_init_nvm_params_i211(struct e1000_hw *hw) -{ - struct e1000_nvm_info *nvm = &hw->nvm; - - DEBUGFUNC("e1000_init_nvm_params_i211"); - - nvm->ops.acquire = e1000_acquire_nvm_i210; - nvm->ops.release = e1000_release_nvm_i210; - nvm->ops.read = e1000_read_nvm_i211; - nvm->ops.valid_led_default = e1000_valid_led_default_i210; - nvm->ops.write = e1000_null_write_nvm; - nvm->ops.validate = e1000_null_ops_generic; - nvm->ops.update = e1000_null_ops_generic; - - return E1000_SUCCESS; -} - -/** * e1000_init_function_pointers_i210 - Init func ptrs. * @hw: pointer to the HW structure * @@ -704,17 +705,8 @@ static s32 e1000_init_nvm_params_i211(struct e1000_hw *hw) void e1000_init_function_pointers_i210(struct e1000_hw *hw) { e1000_init_function_pointers_82575(hw); + hw->nvm.ops.init_params = e1000_init_nvm_params_i210; - switch (hw->mac.type) { - case e1000_i210: - hw->nvm.ops.init_params = e1000_init_nvm_params_i210; - break; - case e1000_i211: - hw->nvm.ops.init_params = e1000_init_nvm_params_i211; - break; - default: - break; - } return; } @@ -764,7 +756,7 @@ out: static s32 __e1000_access_xmdio_reg(struct e1000_hw *hw, u16 address, u8 dev_addr, u16 *data, bool read) { - s32 ret_val = E1000_SUCCESS; + s32 ret_val; DEBUGFUNC("__e1000_access_xmdio_reg"); @@ -824,4 +816,89 @@ s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) return __e1000_access_xmdio_reg(hw, addr, dev_addr, &data, FALSE); } +/** + * e1000_pll_workaround_i210 + * @hw: pointer to the HW structure + * + * Works around an errata in the PLL circuit where it occasionally + * provides the wrong clock frequency after power up. + **/ +static s32 e1000_pll_workaround_i210(struct e1000_hw *hw) +{ + s32 ret_val; + u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val; + u16 nvm_word, phy_word, pci_word, tmp_nvm; + int i; + + /* Get and set needed register values */ + wuc = E1000_READ_REG(hw, E1000_WUC); + mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG); + reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO; + E1000_WRITE_REG(hw, E1000_MDICNFG, reg_val); + + /* Get data from NVM, or set default */ + ret_val = e1000_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD, + &nvm_word); + if (ret_val != E1000_SUCCESS) + nvm_word = E1000_INVM_DEFAULT_AL; + tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; + for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { + /* check current state directly from internal PHY */ + e1000_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE | + E1000_PHY_PLL_FREQ_REG), &phy_word); + if ((phy_word & E1000_PHY_PLL_UNCONF) + != E1000_PHY_PLL_UNCONF) { + ret_val = E1000_SUCCESS; + break; + } else { + ret_val = -E1000_ERR_PHY; + } + /* directly reset the internal PHY */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl|E1000_CTRL_PHY_RST); + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + + E1000_WRITE_REG(hw, E1000_WUC, 0); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16); + E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val); + + e1000_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + pci_word |= E1000_PCI_PMCSR_D3; + e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + msec_delay(1); + pci_word &= ~E1000_PCI_PMCSR_D3; + e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16); + E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val); + + /* restore WUC register */ + E1000_WRITE_REG(hw, E1000_WUC, wuc); + } + /* restore MDICNFG setting */ + E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg); + return ret_val; +} + +/** + * e1000_init_hw_i210 - Init hw for I210/I211 + * @hw: pointer to the HW structure + * + * Called to initialize hw for i210 hw family. + **/ +s32 e1000_init_hw_i210(struct e1000_hw *hw) +{ + s32 ret_val; + DEBUGFUNC("e1000_init_hw_i210"); + if ((hw->mac.type >= e1000_i210) && + !(e1000_get_flash_presence_i210(hw))) { + ret_val = e1000_pll_workaround_i210(hw); + if (ret_val != E1000_SUCCESS) + return ret_val; + } + ret_val = e1000_init_hw_82575(hw); + return ret_val; +} diff --git a/usr/src/uts/common/io/e1000api/e1000_i210.h b/usr/src/uts/common/io/e1000api/e1000_i210.h index c858c56d5b..2a20ca1e6e 100644 --- a/usr/src/uts/common/io/e1000api/e1000_i210.h +++ b/usr/src/uts/common/io/e1000api/e1000_i210.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2014, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -35,10 +35,7 @@ #ifndef _E1000_I210_H_ #define _E1000_I210_H_ -#ifdef __cplusplus -extern "C" { -#endif - +bool e1000_get_flash_presence_i210(struct e1000_hw *hw); s32 e1000_update_flash_i210(struct e1000_hw *hw); s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw); s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw); @@ -46,13 +43,13 @@ s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); -s32 e1000_read_invm_i211(struct e1000_hw *hw, u8 address, u16 *data); s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data); s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data); +s32 e1000_init_hw_i210(struct e1000_hw *hw); #define E1000_STM_OPCODE 0xDB00 #define E1000_EEPROM_FLASH_SIZE_WORD 0x11 @@ -90,7 +87,7 @@ enum E1000_INVM_STRUCTURE_TYPE { (ID_LED_OFF1_OFF2)) #define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \ (ID_LED_DEF1_DEF2 << 4) | \ - (ID_LED_DEF1_DEF2)) + (ID_LED_OFF1_ON2)) /* NVM offset defaults for I211 devices */ #define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243 @@ -98,8 +95,15 @@ enum E1000_INVM_STRUCTURE_TYPE { #define NVM_LED_1_CFG_DEFAULT_I211 0x0184 #define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C -#ifdef __cplusplus -} -#endif +/* PLL Defines */ +#define E1000_PCI_PMCSR 0x44 +#define E1000_PCI_PMCSR_D3 0x03 +#define E1000_MAX_PLL_TRIES 5 +#define E1000_PHY_PLL_UNCONF 0xFF +#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000 +#define E1000_PHY_PLL_FREQ_REG 0x000E +#define E1000_INVM_DEFAULT_AL 0x202F +#define E1000_INVM_AUTOLOAD 0x0A +#define E1000_INVM_PLL_WO_VAL 0x0010 -#endif /* _E1000_I210_H_ */ +#endif diff --git a/usr/src/uts/common/io/e1000api/e1000_ich8lan.c b/usr/src/uts/common/io/e1000api/e1000_ich8lan.c index 1c9f93f544..14c5689edb 100644 --- a/usr/src/uts/common/io/e1000api/e1000_ich8lan.c +++ b/usr/src/uts/common/io/e1000api/e1000_ich8lan.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2014, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -59,6 +59,14 @@ * 82578DC Gigabit Network Connection * 82579LM Gigabit Network Connection * 82579V Gigabit Network Connection + * Ethernet Connection I217-LM + * Ethernet Connection I217-V + * Ethernet Connection I218-V + * Ethernet Connection I218-LM + * Ethernet Connection (2) I218-LM + * Ethernet Connection (2) I218-V + * Ethernet Connection (3) I218-LM + * Ethernet Connection (3) I218-V */ #include "e1000_api.h" @@ -69,8 +77,9 @@ static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw); static void e1000_release_nvm_ich8lan(struct e1000_hw *hw); static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); -static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index); -static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index); +static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index); +static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index); +static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw); static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw, u8 *mc_addr_list, u32 mc_addr_count); @@ -181,8 +190,9 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) { u16 phy_reg = 0; u32 phy_id = 0; - s32 ret_val; + s32 ret_val = 0; u16 retry_count; + u32 mac_reg = 0; for (retry_count = 0; retry_count < 2; retry_count++) { ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg); @@ -201,23 +211,84 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) if (hw->phy.id) { if (hw->phy.id == phy_id) - return TRUE; + goto out; } else if (phy_id) { hw->phy.id = phy_id; hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK); - return TRUE; + goto out; } /* In case the PHY needs to be in mdio slow mode, * set slow mode and try to get the PHY id again. */ - hw->phy.ops.release(hw); - ret_val = e1000_set_mdio_slow_mode_hv(hw); - if (!ret_val) - ret_val = e1000_get_phy_id(hw); - hw->phy.ops.acquire(hw); + if (hw->mac.type < e1000_pch_lpt) { + hw->phy.ops.release(hw); + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (!ret_val) + ret_val = e1000_get_phy_id(hw); + hw->phy.ops.acquire(hw); + } + + if (ret_val) + return FALSE; +out: + if (hw->mac.type == e1000_pch_lpt) { + /* Unforce SMBus mode in PHY */ + hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg); + phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; + hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg); + + /* Unforce SMBus mode in MAC */ + mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); + } + + return TRUE; +} + +/** + * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value + * @hw: pointer to the HW structure + * + * Toggling the LANPHYPC pin value fully power-cycles the PHY and is + * used to reset the PHY to a quiescent state when necessary. + **/ +static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw) +{ + u32 mac_reg; + + DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt"); + + /* Set Phy Config Counter to 50msec */ + mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3); + mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; + mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; + E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg); + + /* Toggle LANPHYPC Value bit */ + mac_reg = E1000_READ_REG(hw, E1000_CTRL); + mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE; + mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE; + E1000_WRITE_REG(hw, E1000_CTRL, mac_reg); + E1000_WRITE_FLUSH(hw); + usec_delay(10); + mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE; + E1000_WRITE_REG(hw, E1000_CTRL, mac_reg); + E1000_WRITE_FLUSH(hw); + + if (hw->mac.type < e1000_pch_lpt) { + msec_delay(50); + } else { + u16 count = 20; + + do { + msec_delay(5); + } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) & + E1000_CTRL_EXT_LPCD) && count--); - return !ret_val; + msec_delay(30); + } } /** @@ -231,7 +302,6 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) { u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM); s32 ret_val; - u16 phy_reg; DEBUGFUNC("e1000_init_phy_workarounds_pchlan"); @@ -240,6 +310,12 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) */ e1000_gate_hw_phy_config_ich8lan(hw, TRUE); + /* It is not possible to be certain of the current state of ULP + * so forcibly disable it. + */ + hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown; + e1000_disable_ulp_lpt_lp(hw, TRUE); + ret_val = hw->phy.ops.acquire(hw); if (ret_val) { DEBUGOUT("Failed to initialize PHY flow\n"); @@ -262,24 +338,16 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); + /* Wait 50 milliseconds for MAC to finish any retries + * that it might be trying to perform from previous + * attempts to acknowledge any phy read requests. + */ + msec_delay(50); + /* fall-through */ case e1000_pch2lan: - if (e1000_phy_is_accessible_pchlan(hw)) { - if (hw->mac.type == e1000_pch_lpt) { - /* Unforce SMBus mode in PHY */ - hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, - &phy_reg); - phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; - hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, - phy_reg); - - /* Unforce SMBus mode in MAC */ - mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); - mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; - E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); - } + if (e1000_phy_is_accessible_pchlan(hw)) break; - } /* fall-through */ case e1000_pchlan: @@ -289,44 +357,27 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) if (hw->phy.ops.check_reset_block(hw)) { DEBUGOUT("Required LANPHYPC toggle blocked by ME\n"); + ret_val = -E1000_ERR_PHY; break; } - DEBUGOUT("Toggling LANPHYPC\n"); - - /* Set Phy Config Counter to 50msec */ - mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3); - mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; - mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; - E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg); + /* Toggle LANPHYPC Value bit */ + e1000_toggle_lanphypc_pch_lpt(hw); + if (hw->mac.type >= e1000_pch_lpt) { + if (e1000_phy_is_accessible_pchlan(hw)) + break; - if (hw->mac.type == e1000_pch_lpt) { /* Toggling LANPHYPC brings the PHY out of SMBus mode - * So ensure that the MAC is also out of SMBus mode + * so ensure that the MAC is also out of SMBus mode */ mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); - } - /* Toggle LANPHYPC Value bit */ - mac_reg = E1000_READ_REG(hw, E1000_CTRL); - mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE; - mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE; - E1000_WRITE_REG(hw, E1000_CTRL, mac_reg); - E1000_WRITE_FLUSH(hw); - usec_delay(10); - mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE; - E1000_WRITE_REG(hw, E1000_CTRL, mac_reg); - E1000_WRITE_FLUSH(hw); - if (hw->mac.type < e1000_pch_lpt) { - msec_delay(50); - } else { - u16 count = 20; - do { - msec_delay(5); - } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) & - E1000_CTRL_EXT_LPCD) && count--); + if (e1000_phy_is_accessible_pchlan(hw)) + break; + + ret_val = -E1000_ERR_PHY; } break; default: @@ -334,13 +385,33 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) } hw->phy.ops.release(hw); + if (!ret_val) { - /* Reset the PHY before any access to it. Doing so, ensures - * that the PHY is in a known good state before we read/write - * PHY registers. The generic reset is sufficient here, - * because we haven't determined the PHY type yet. - */ - ret_val = e1000_phy_hw_reset_generic(hw); + /* Check to see if able to reset PHY. Print error if not */ + if (hw->phy.ops.check_reset_block(hw)) { + ERROR_REPORT("Reset blocked by ME\n"); + goto out; + } + + /* Reset the PHY before any access to it. Doing so, ensures + * that the PHY is in a known good state before we read/write + * PHY registers. The generic reset is sufficient here, + * because we haven't determined the PHY type yet. + */ + ret_val = e1000_phy_hw_reset_generic(hw); + if (ret_val) + goto out; + + /* On a successful reset, possibly need to wait for the PHY + * to quiesce to an accessible state before returning control + * to the calling function. If the PHY does not quiesce, then + * return E1000E_BLK_PHY_RESET, as this is the condition that + * the PHY is in. + */ + ret_val = hw->phy.ops.check_reset_block(hw); + if (ret_val) + ERROR_REPORT("ME blocked access to PHY after reset\n"); + } out: /* Ungate automatic PHY configuration on non-managed 82579 */ @@ -550,13 +621,12 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) DEBUGFUNC("e1000_init_nvm_params_ich8lan"); /* Can't read flash registers if the register set isn't mapped. */ + nvm->type = e1000_nvm_flash_sw; if (!hw->flash_address) { DEBUGOUT("ERROR: Flash registers not mapped\n"); return -E1000_ERR_CONFIG; } - nvm->type = e1000_nvm_flash_sw; - gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG); /* sector_X_addr is a "sector"-aligned address (4096 bytes) @@ -572,8 +642,8 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) /* find total size of the NVM, then cut in half since the total * size represents two separate NVM banks. */ - nvm->flash_bank_size = (sector_end_addr - sector_base_addr) - << FLASH_SECTOR_ADDR_SHIFT; + nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) + << FLASH_SECTOR_ADDR_SHIFT); nvm->flash_bank_size /= 2; /* Adjust to word count */ nvm->flash_bank_size /= sizeof(u16); @@ -766,7 +836,7 @@ s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data) * * Assumes the SW/FW/HW Semaphore is already acquired. **/ -static s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data) +s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data) { DEBUGFUNC("e1000_read_emi_reg_locked"); @@ -780,18 +850,35 @@ static s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data) * Enable/disable EEE based on setting in dev_spec structure, the duplex of * the link and the EEE capabilities of the link partner. The LPI Control * register bits will remain set only if/when link is up. + * + * EEE LPI must not be asserted earlier than one second after link is up. + * On 82579, EEE LPI should not be enabled until such time otherwise there + * can be link issues with some switches. Other devices can have EEE LPI + * enabled immediately upon link up since they have a timer in hardware which + * prevents LPI from being asserted too early. **/ -static s32 e1000_set_eee_pchlan(struct e1000_hw *hw) +s32 e1000_set_eee_pchlan(struct e1000_hw *hw) { struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; s32 ret_val; - u16 lpi_ctrl; + u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data; DEBUGFUNC("e1000_set_eee_pchlan"); - if ((hw->phy.type != e1000_phy_82579) && - (hw->phy.type != e1000_phy_i217)) + switch (hw->phy.type) { + case e1000_phy_82579: + lpa = I82579_EEE_LP_ABILITY; + pcs_status = I82579_EEE_PCS_STATUS; + adv_addr = I82579_EEE_ADVERTISEMENT; + break; + case e1000_phy_i217: + lpa = I217_EEE_LP_ABILITY; + pcs_status = I217_EEE_PCS_STATUS; + adv_addr = I217_EEE_ADVERTISEMENT; + break; + default: return E1000_SUCCESS; + } ret_val = hw->phy.ops.acquire(hw); if (ret_val) @@ -806,34 +893,24 @@ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw) /* Enable EEE if not disabled by user */ if (!dev_spec->eee_disable) { - u16 lpa, pcs_status, data; - /* Save off link partner's EEE ability */ - switch (hw->phy.type) { - case e1000_phy_82579: - lpa = I82579_EEE_LP_ABILITY; - pcs_status = I82579_EEE_PCS_STATUS; - break; - case e1000_phy_i217: - lpa = I217_EEE_LP_ABILITY; - pcs_status = I217_EEE_PCS_STATUS; - break; - default: - ret_val = -E1000_ERR_PHY; - goto release; - } ret_val = e1000_read_emi_reg_locked(hw, lpa, &dev_spec->eee_lp_ability); if (ret_val) goto release; + /* Read EEE advertisement */ + ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv); + if (ret_val) + goto release; + /* Enable EEE only for speeds in which the link partner is - * EEE capable. + * EEE capable and for which we advertise EEE. */ - if (dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED) + if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED) lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE; - if (dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) { + if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) { hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data); if (data & NWAY_LPAR_100TX_FD_CAPS) lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE; @@ -845,13 +922,24 @@ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw) dev_spec->eee_lp_ability &= ~I82579_EEE_100_SUPPORTED; } + } - /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */ - ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data); + if (hw->phy.type == e1000_phy_82579) { + ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT, + &data); if (ret_val) goto release; + + data &= ~I82579_LPI_100_PLL_SHUT; + ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT, + data); } + /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */ + ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data); + if (ret_val) + goto release; + ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl); release: hw->phy.ops.release(hw); @@ -867,30 +955,31 @@ release: * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications * preventing further DMA write requests. Workaround the issue by disabling * the de-assertion of the clock request when in 1Gpbs mode. + * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link + * speeds in order to avoid Tx hangs. **/ static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link) { u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6); + u32 status = E1000_READ_REG(hw, E1000_STATUS); s32 ret_val = E1000_SUCCESS; + u16 reg; - if (link && (E1000_READ_REG(hw, E1000_STATUS) & - E1000_STATUS_SPEED_1000)) { - u16 kmrn_reg; - + if (link && (status & E1000_STATUS_SPEED_1000)) { ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, - &kmrn_reg); + ®); if (ret_val) goto release; ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, - kmrn_reg & + reg & ~E1000_KMRNCTRLSTA_K1_ENABLE); if (ret_val) goto release; @@ -903,13 +992,45 @@ static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link) ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, - kmrn_reg); + reg); release: hw->phy.ops.release(hw); } else { /* clear FEXTNVM6 bit 8 on link down or 10/100 */ - E1000_WRITE_REG(hw, E1000_FEXTNVM6, - fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); + fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK; + + if (!link || ((status & E1000_STATUS_SPEED_100) && + (status & E1000_STATUS_FD))) + goto update_fextnvm6; + + ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, ®); + if (ret_val) + return ret_val; + + /* Clear link status transmit timeout */ + reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK; + + if (status & E1000_STATUS_SPEED_100) { + /* Set inband Tx timeout to 5x10us for 100Half */ + reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; + + /* Do not extend the K1 entry latency for 100Half */ + fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; + } else { + /* Set inband Tx timeout to 50x10us for 10Full/Half */ + reg |= 50 << + I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; + + /* Extend the K1 entry latency for 10 Mbps */ + fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; + } + + ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg); + if (ret_val) + return ret_val; + +update_fextnvm6: + E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6); } return ret_val; @@ -1018,7 +1139,6 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) lat_ns /= 1000000000; obff_hwm = (s32)(rxa - lat_ns); } - if ((obff_hwm < 0) || (obff_hwm > E1000_SVT_OFF_HWM_MASK)) { DEBUGOUT1("Invalid high water mark %d\n", obff_hwm); return -E1000_ERR_CONFIG; @@ -1079,6 +1199,256 @@ static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr) } /** + * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP + * @hw: pointer to the HW structure + * @to_sx: boolean indicating a system power state transition to Sx + * + * When link is down, configure ULP mode to significantly reduce the power + * to the PHY. If on a Manageability Engine (ME) enabled system, tell the + * ME firmware to start the ULP configuration. If not on an ME enabled + * system, configure the ULP mode by software. + */ +s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) +{ + u32 mac_reg; + s32 ret_val = E1000_SUCCESS; + u16 phy_reg; + + if ((hw->mac.type < e1000_pch_lpt) || + (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) || + (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) || + (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) || + (hw->device_id == E1000_DEV_ID_PCH_I218_V2) || + (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on)) + return 0; + + if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) { + /* Request ME configure ULP mode in the PHY */ + mac_reg = E1000_READ_REG(hw, E1000_H2ME); + mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS; + E1000_WRITE_REG(hw, E1000_H2ME, mac_reg); + + goto out; + } + + if (!to_sx) { + int i = 0; + + /* Poll up to 5 seconds for Cable Disconnected indication */ + while (!(E1000_READ_REG(hw, E1000_FEXT) & + E1000_FEXT_PHY_CABLE_DISCONNECTED)) { + /* Bail if link is re-acquired */ + if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU) + return -E1000_ERR_PHY; + + if (i++ == 100) + break; + + msec_delay(50); + } + DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n", + (E1000_READ_REG(hw, E1000_FEXT) & + E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", + i * 50); + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + /* Force SMBus mode in PHY */ + ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); + if (ret_val) + goto release; + phy_reg |= CV_SMB_CTRL_FORCE_SMBUS; + e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); + + /* Force SMBus mode in MAC */ + mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); + + /* Set Inband ULP Exit, Reset to SMBus mode and + * Disable SMBus Release on PERST# in PHY + */ + ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg); + if (ret_val) + goto release; + phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS | + I218_ULP_CONFIG1_DISABLE_SMB_PERST); + if (to_sx) { + if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC) + phy_reg |= I218_ULP_CONFIG1_WOL_HOST; + + phy_reg |= I218_ULP_CONFIG1_STICKY_ULP; + } else { + phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT; + } + e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); + + /* Set Disable SMBus Release on PERST# in MAC */ + mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7); + mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST; + E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg); + + /* Commit ULP changes in PHY by starting auto ULP configuration */ + phy_reg |= I218_ULP_CONFIG1_START; + e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); +release: + hw->phy.ops.release(hw); +out: + if (ret_val) { + DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val); + } else + hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on; + + return ret_val; +} + +/** + * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP + * @hw: pointer to the HW structure + * @force: boolean indicating whether or not to force disabling ULP + * + * Un-configure ULP mode when link is up, the system is transitioned from + * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled + * system, poll for an indication from ME that ULP has been un-configured. + * If not on an ME enabled system, un-configure the ULP mode by software. + * + * During nominal operation, this function is called when link is acquired + * to disable ULP mode (force=FALSE); otherwise, for example when unloading + * the driver or during Sx->S0 transitions, this is called with force=TRUE + * to forcibly disable ULP. + */ +s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force) +{ + s32 ret_val = E1000_SUCCESS; + u32 mac_reg; + u16 phy_reg; + int i = 0; + + if ((hw->mac.type < e1000_pch_lpt) || + (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) || + (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) || + (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) || + (hw->device_id == E1000_DEV_ID_PCH_I218_V2) || + (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off)) + return 0; + + if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) { + if (force) { + /* Request ME un-configure ULP mode in the PHY */ + mac_reg = E1000_READ_REG(hw, E1000_H2ME); + mac_reg &= ~E1000_H2ME_ULP; + mac_reg |= E1000_H2ME_ENFORCE_SETTINGS; + E1000_WRITE_REG(hw, E1000_H2ME, mac_reg); + } + + /* Poll up to 100msec for ME to clear ULP_CFG_DONE */ + while (E1000_READ_REG(hw, E1000_FWSM) & + E1000_FWSM_ULP_CFG_DONE) { + if (i++ == 10) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + msec_delay(10); + } + DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10); + + if (force) { + mac_reg = E1000_READ_REG(hw, E1000_H2ME); + mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS; + E1000_WRITE_REG(hw, E1000_H2ME, mac_reg); + } else { + /* Clear H2ME.ULP after ME ULP configuration */ + mac_reg = E1000_READ_REG(hw, E1000_H2ME); + mac_reg &= ~E1000_H2ME_ULP; + E1000_WRITE_REG(hw, E1000_H2ME, mac_reg); + } + + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + if (force) + /* Toggle LANPHYPC Value bit */ + e1000_toggle_lanphypc_pch_lpt(hw); + + /* Unforce SMBus mode in PHY */ + ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); + if (ret_val) { + /* The MAC might be in PCIe mode, so temporarily force to + * SMBus mode in order to access the PHY. + */ + mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); + + msec_delay(50); + + ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, + &phy_reg); + if (ret_val) + goto release; + } + phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; + e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); + + /* Unforce SMBus mode in MAC */ + mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); + + /* When ULP mode was previously entered, K1 was disabled by the + * hardware. Re-Enable K1 in the PHY when exiting ULP. + */ + ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg); + if (ret_val) + goto release; + phy_reg |= HV_PM_CTRL_K1_ENABLE; + e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg); + + /* Clear ULP enabled configuration */ + ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg); + if (ret_val) + goto release; + phy_reg &= ~(I218_ULP_CONFIG1_IND | + I218_ULP_CONFIG1_STICKY_ULP | + I218_ULP_CONFIG1_RESET_TO_SMBUS | + I218_ULP_CONFIG1_WOL_HOST | + I218_ULP_CONFIG1_INBAND_EXIT | + I218_ULP_CONFIG1_DISABLE_SMB_PERST); + e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); + + /* Commit ULP changes by starting auto ULP configuration */ + phy_reg |= I218_ULP_CONFIG1_START; + e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); + + /* Clear Disable SMBus Release on PERST# in MAC */ + mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7); + mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST; + E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg); + +release: + hw->phy.ops.release(hw); + if (force) { + hw->phy.ops.reset(hw); + msec_delay(50); + } +out: + if (ret_val) { + DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val); + } else + hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off; + + return ret_val; +} + +/** * e1000_check_for_copper_link_ich8lan - Check for link (Copper) * @hw: pointer to the HW structure * @@ -1103,13 +1473,13 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) if (!mac->get_link_status) return E1000_SUCCESS; - /* First we want to see if the MII Status Register reports - * link. If so, then we want to get the current speed/duplex - * of the PHY. - */ - ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); - if (ret_val) - return ret_val; + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; if (hw->mac.type == e1000_pchlan) { ret_val = e1000_k1_gig_workaround_hv(hw, link); @@ -1117,14 +1487,17 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) return ret_val; } - /* When connected at 10Mbps half-duplex, 82579 parts are excessively + /* When connected at 10Mbps half-duplex, some parts are excessively * aggressive resulting in many collisions. To avoid this, increase * the IPG and reduce Rx latency in the PHY. */ - if ((hw->mac.type == e1000_pch2lan) && link) { + if (((hw->mac.type == e1000_pch2lan) || + (hw->mac.type == e1000_pch_lpt)) && link) { u32 reg; reg = E1000_READ_REG(hw, E1000_STATUS); if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) { + u16 emi_addr; + reg = E1000_READ_REG(hw, E1000_TIPG); reg &= ~E1000_TIPG_IPGT_MASK; reg |= 0xFF; @@ -1135,7 +1508,11 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) if (ret_val) return ret_val; - ret_val = e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0); + if (hw->mac.type == e1000_pch2lan) + emi_addr = I82579_RX_CONFIG; + else + emi_addr = I217_RX_CONFIG; + ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0); hw->phy.ops.release(hw); @@ -1146,15 +1523,17 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) /* Work-around I218 hang issue */ if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || - (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) { + (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || + (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) || + (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) { ret_val = e1000_k1_workaround_lpt_lp(hw, link); if (ret_val) return ret_val; } - if (hw->mac.type == e1000_pch_lpt) { - /* Set platform power management values for Latency Tolerance - * Reporting (LTR) and Optimized Buffer Flush/Fill (OBFF). + /* Set platform power management values for + * Latency Tolerance Reporting (LTR) + * Optimized Buffer Flush/Fill (OBFF) */ ret_val = e1000_platform_pm_pch_lpt(hw, link); if (ret_val) @@ -1206,9 +1585,11 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) e1000_check_downshift_generic(hw); /* Enable/Disable EEE after link up */ - ret_val = e1000_set_eee_pchlan(hw); - if (ret_val) - return ret_val; + if (hw->phy.type > e1000_phy_82579) { + ret_val = e1000_set_eee_pchlan(hw); + if (ret_val) + return ret_val; + } /* If we are forcing speed/duplex, then we simply return since * we have already determined whether we have link or not. @@ -1432,7 +1813,7 @@ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw) * contain the MAC address but RAR[1-6] are reserved for manageability (ME). * Use SHRA[0-3] in place of those reserved for ME. **/ -static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index) +static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index) { u32 rar_low, rar_high; @@ -1456,10 +1837,13 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index) E1000_WRITE_FLUSH(hw); E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); E1000_WRITE_FLUSH(hw); - return; + return E1000_SUCCESS; } - if (index < hw->mac.rar_entry_count) { + /* RAR[1-6] are owned by manageability. Skip those and program the + * next address into the SHRA register array. + */ + if (index < (u32) (hw->mac.rar_entry_count)) { s32 ret_val; ret_val = e1000_acquire_swflag_ich8lan(hw); @@ -1476,7 +1860,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index) /* verify the register updates */ if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) && (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high)) - return; + return E1000_SUCCESS; DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n", (index - 1), E1000_READ_REG(hw, E1000_FWSM)); @@ -1484,6 +1868,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index) out: DEBUGOUT1("Failed to write receive address at index %d\n", index); + return -E1000_ERR_CONFIG; } /** @@ -1497,7 +1882,7 @@ out: * contain the MAC address. SHRA[0-10] are the shared receive address * registers that are shared between the Host and manageability engine (ME). **/ -static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index) +static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index) { u32 rar_low, rar_high; u32 wlock_mac; @@ -1521,7 +1906,7 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index) E1000_WRITE_FLUSH(hw); E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); E1000_WRITE_FLUSH(hw); - return; + return E1000_SUCCESS; } /* The manageability engine (ME) can lock certain SHRAR registers that @@ -1556,12 +1941,13 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index) /* verify the register updates */ if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) && (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high)) - return; + return E1000_SUCCESS; } } out: DEBUGOUT1("Failed to write receive address at index %d\n", index); + return -E1000_ERR_CONFIG; } /** @@ -1619,13 +2005,21 @@ release: static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) { u32 fwsm; + bool blocked = FALSE; + int i = 0; DEBUGFUNC("e1000_check_reset_block_ich8lan"); - fwsm = E1000_READ_REG(hw, E1000_FWSM); - - return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS - : E1000_BLK_PHY_RESET; + do { + fwsm = E1000_READ_REG(hw, E1000_FWSM); + if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) { + blocked = TRUE; + msec_delay(10); + continue; + } + blocked = FALSE; + } while (blocked && (i++ < 10)); + return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS; } /** @@ -1825,9 +2219,9 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) if (ret_val) goto release; - status_reg &= BM_CS_STATUS_LINK_UP | - BM_CS_STATUS_RESOLVED | - BM_CS_STATUS_SPEED_MASK; + status_reg &= (BM_CS_STATUS_LINK_UP | + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_MASK); if (status_reg == (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | @@ -1841,9 +2235,9 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) if (ret_val) goto release; - status_reg &= HV_M_STATUS_LINK_UP | - HV_M_STATUS_AUTONEG_COMPLETE | - HV_M_STATUS_SPEED_MASK; + status_reg &= (HV_M_STATUS_LINK_UP | + HV_M_STATUS_AUTONEG_COMPLETE | + HV_M_STATUS_SPEED_MASK); if (status_reg == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE | @@ -2125,8 +2519,8 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) if (ret_val) goto release; - /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */ - for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { + /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */ + for (i = 0; i < (hw->mac.rar_entry_count); i++) { mac_reg = E1000_READ_REG(hw, E1000_RAL(i)); hw->phy.ops.write_reg_page(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF)); @@ -2191,10 +2585,10 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) return ret_val; if (enable) { - /* Write Rx addresses (rar_entry_count for RAL/H, +4 for + /* Write Rx addresses (rar_entry_count for RAL/H, and * SHRAL/H) and initial CRC values to the MAC */ - for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { + for (i = 0; i < hw->mac.rar_entry_count; i++) { u8 mac_addr[ETH_ADDR_LEN] = {0}; u32 addr_high, addr_low; @@ -2263,7 +2657,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) return ret_val; hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data); data &= ~(0x3FF << 2); - data |= (0x1A << 2); + data |= (E1000_TX_PTR_GAP << 2); ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data); if (ret_val) return ret_val; @@ -2377,55 +2771,47 @@ release: * e1000_k1_gig_workaround_lv - K1 Si workaround * @hw: pointer to the HW structure * - * Workaround to set the K1 beacon duration for 82579 parts + * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps + * Disable K1 for 1000 and 100 speeds **/ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; u16 status_reg = 0; - u32 mac_reg; - u16 phy_reg; DEBUGFUNC("e1000_k1_workaround_lv"); if (hw->mac.type != e1000_pch2lan) return E1000_SUCCESS; - /* Set K1 beacon duration based on 1Gbps speed or otherwise */ + /* Set K1 beacon duration based on 10Mbs speed */ ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg); if (ret_val) return ret_val; if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { - mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4); - mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; - - ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg); - if (ret_val) - return ret_val; - - if (status_reg & HV_M_STATUS_SPEED_1000) { + if (status_reg & + (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) { u16 pm_phy_reg; - mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; - phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; - /* LV 1G Packet drop issue wa */ + /* LV 1G/100 Packet drop issue wa */ ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL, &pm_phy_reg); if (ret_val) return ret_val; - pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA; + pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE; ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, pm_phy_reg); if (ret_val) return ret_val; } else { + u32 mac_reg; + mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4); + mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; - phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; + E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg); } - E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg); - ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg); } return ret_val; @@ -2962,7 +3348,6 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) /* Clear FCERR and DAEL in hw status by writing 1 */ hsfsts.hsf_status.flcerr = 1; hsfsts.hsf_status.dael = 1; - E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval); /* Either we should have a hardware SPI cycle in progress @@ -3029,6 +3414,7 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); hsflctl.hsf_ctrl.flcgo = 1; + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); /* wait till FDONE bit is set to 1 */ @@ -3083,6 +3469,7 @@ static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, u16 word = 0; ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); + if (ret_val) return ret_val; @@ -3112,11 +3499,10 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, DEBUGFUNC("e1000_read_flash_data_ich8lan"); - if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) return -E1000_ERR_NVM; - - flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + - hw->nvm.flash_base_addr; + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); do { usec_delay(1); @@ -3124,13 +3510,12 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, ret_val = e1000_flash_cycle_init_ich8lan(hw); if (ret_val != E1000_SUCCESS) break; - hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ hsflctl.hsf_ctrl.fldbcount = size - 1; hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); - E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); ret_val = e1000_flash_cycle_ich8lan(hw, @@ -3169,6 +3554,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, return ret_val; } + /** * e1000_write_nvm_ich8lan - Write word(s) to the NVM * @hw: pointer to the HW structure @@ -3222,7 +3608,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 i, act_offset, new_bank_offset, old_bank_offset, bank; s32 ret_val; - u16 data; + u16 data = 0; DEBUGFUNC("e1000_update_nvm_checksum_ich8lan"); @@ -3258,12 +3644,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) if (ret_val) goto release; } - for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { - /* Determine whether to write the value stored - * in the other NVM bank or a modified value stored - * in the shadow RAM - */ if (dev_spec->shadow_ram[i].modified) { data = dev_spec->shadow_ram[i].value; } else { @@ -3273,7 +3654,6 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) if (ret_val) break; } - /* If the word is 0x13, then make sure the signature bits * (15:14) are 11b until the commit has completed. * This will allow us to write 10b which indicates the @@ -3288,6 +3668,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) act_offset = (i + new_bank_offset) << 1; usec_delay(100); + /* Write the bytes to the new bank. */ ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, @@ -3301,7 +3682,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) (u8)(data >> 8)); if (ret_val) break; - } + } /* Don't bother writing the segment valid bits if sector * programming failed. @@ -3322,8 +3703,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) goto release; data &= 0xBFFF; - ret_val = e1000_retry_write_flash_byte_ich8lan(hw, - act_offset * 2 + 1, + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1, (u8)(data >> 8)); if (ret_val) goto release; @@ -3334,7 +3714,9 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) * to 1's. We can write 1's to 0's without an erase */ act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); + if (ret_val) goto release; @@ -3433,12 +3815,11 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, DEBUGFUNC("e1000_write_ich8_data"); - if (size < 1 || size > 2 || data > size * 0xff || - offset > ICH_FLASH_LINEAR_ADDR_MASK) + if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) return -E1000_ERR_NVM; - flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + - hw->nvm.flash_base_addr; + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); do { usec_delay(1); @@ -3446,8 +3827,8 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, ret_val = e1000_flash_cycle_init_ich8lan(hw); if (ret_val != E1000_SUCCESS) break; - hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ hsflctl.hsf_ctrl.fldbcount = size - 1; hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; @@ -3465,8 +3846,9 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, /* check if FCERR is set to 1 , if set to 1, clear it * and try the whole sequence a few more times else done */ - ret_val = e1000_flash_cycle_ich8lan(hw, - ICH_FLASH_WRITE_COMMAND_TIMEOUT); + ret_val = + e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_WRITE_COMMAND_TIMEOUT); if (ret_val == E1000_SUCCESS) break; @@ -3488,6 +3870,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, return ret_val; } + /** * e1000_write_flash_byte_ich8lan - Write a single byte to NVM * @hw: pointer to the HW structure @@ -3506,6 +3889,8 @@ static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, return e1000_write_flash_data_ich8lan(hw, offset, 1, word); } + + /** * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM * @hw: pointer to the HW structure @@ -3602,8 +3987,10 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) flash_linear_addr = hw->nvm.flash_base_addr; flash_linear_addr += (bank) ? flash_bank_size : 0; - for (j = 0; j < iteration ; j++) { + for (j = 0; j < iteration; j++) { do { + u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT; + /* Steps */ ret_val = e1000_flash_cycle_init_ich8lan(hw); if (ret_val) @@ -3612,8 +3999,9 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) /* Write a value 11 (block Erase) in Flash * Cycle field in hw flash control */ - hsflctl.regval = E1000_READ_FLASH_REG16(hw, - ICH_FLASH_HSFCTL); + hsflctl.regval = + E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); @@ -3626,8 +4014,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); - ret_val = e1000_flash_cycle_ich8lan(hw, - ICH_FLASH_ERASE_COMMAND_TIMEOUT); + ret_val = e1000_flash_cycle_ich8lan(hw, timeout); if (ret_val == E1000_SUCCESS) break; @@ -3947,16 +4334,16 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) /* Set the transmit descriptor write-back policy for both queues */ txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0)); - txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | - E1000_TXDCTL_FULL_TX_DESC_WB; - txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | - E1000_TXDCTL_MAX_TX_DESC_PREFETCH; + txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB); + txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH); E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl); txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1)); - txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | - E1000_TXDCTL_FULL_TX_DESC_WB; - txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | - E1000_TXDCTL_MAX_TX_DESC_PREFETCH; + txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB); + txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH); E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl); /* ICH8 has opposite polarity of no_snoop bits. @@ -4041,6 +4428,7 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) */ reg = E1000_READ_REG(hw, E1000_RFCTL); reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); + /* Disable IPv6 extension header parsing because some malformed * IPv6 headers can hang the Rx. */ @@ -4479,7 +4867,9 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) u16 phy_reg, device_id = hw->device_id; if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || - (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) { + (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || + (device_id == E1000_DEV_ID_PCH_I218_LM3) || + (device_id == E1000_DEV_ID_PCH_I218_V3)) { u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6); E1000_WRITE_REG(hw, E1000_FEXTNVM6, @@ -4502,14 +4892,25 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) /* Disable LPLU if both link partners support 100BaseT * EEE and 100Full is advertised on both ends of the - * link. + * link, and enable Auto Enable LPI since there will + * be no driver to enable LPI while in Sx. */ if ((eee_advert & I82579_EEE_100_SUPPORTED) && (dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) && - (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) + (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) { phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_NOND0A_LPLU); + + /* Set Auto Enable LPI after link up */ + hw->phy.ops.read_reg_locked(hw, + I217_LPI_GPIO_CTRL, + &phy_reg); + phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI; + hw->phy.ops.write_reg_locked(hw, + I217_LPI_GPIO_CTRL, + phy_reg); + } } /* For i217 Intel Rapid Start Technology support, @@ -4520,7 +4921,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) * The SMBus release must also be disabled on LCD reset. */ if (!(E1000_READ_REG(hw, E1000_FWSM) & - E1000_ICH_FWSM_FW_VALID)) { + E1000_ICH_FWSM_FW_VALID)) { /* Enable proxy to reset only on power good. */ hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL, &phy_reg); @@ -4613,6 +5014,11 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) return; } + /* Clear Auto Enable LPI after link up */ + hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg); + phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI; + hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg); + if (!(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID)) { /* Restore clear on SMB if no manageability engine diff --git a/usr/src/uts/common/io/e1000api/e1000_ich8lan.h b/usr/src/uts/common/io/e1000api/e1000_ich8lan.h index ceeca2e044..999e856be3 100644 --- a/usr/src/uts/common/io/e1000api/e1000_ich8lan.h +++ b/usr/src/uts/common/io/e1000api/e1000_ich8lan.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2014, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -35,10 +35,6 @@ #ifndef _E1000_ICH8LAN_H_ #define _E1000_ICH8LAN_H_ -#ifdef __cplusplus -extern "C" { -#endif - #define ICH_FLASH_GFPREG 0x0000 #define ICH_FLASH_HSFSTS 0x0004 #define ICH_FLASH_HSFCTL 0x0006 @@ -64,21 +60,26 @@ extern "C" { #define ICH_FLASH_SEG_SIZE_8K 8192 #define ICH_FLASH_SEG_SIZE_64K 65536 -#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */ +#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */ /* FW established a valid mode */ -#define E1000_ICH_FWSM_FW_VALID 0x00008000 -#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */ +#define E1000_ICH_FWSM_FW_VALID 0x00008000 +#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */ #define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000 #define E1000_ICH_MNG_IAMT_MODE 0x2 #define E1000_FWSM_WLOCK_MAC_MASK 0x0380 #define E1000_FWSM_WLOCK_MAC_SHIFT 7 +#define E1000_FWSM_ULP_CFG_DONE 0x00000400 /* Low power cfg done */ /* Shared Receive Address Registers */ #define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8)) #define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8)) +#define E1000_H2ME 0x05B50 /* Host to ME */ +#define E1000_H2ME_ULP 0x00000800 /* ULP Indication Bit */ +#define E1000_H2ME_ENFORCE_SETTINGS 0x00001000 /* Enforce Settings */ + #define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \ (ID_LED_OFF1_OFF2 << 8) | \ (ID_LED_OFF1_ON2 << 4) | \ @@ -91,8 +92,11 @@ extern "C" { #define E1000_ICH8_LAN_INIT_TIMEOUT 1500 +/* FEXT register bit definition */ +#define E1000_FEXT_PHY_CABLE_DISCONNECTED 0x00000004 + #define E1000_FEXTNVM_SW_CONFIG 1 -#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M */ +#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* different on ICH8M */ #define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000 #define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000 @@ -102,6 +106,9 @@ extern "C" { #define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 #define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100 +#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200 + +#define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020 #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL @@ -112,8 +119,8 @@ extern "C" { #define PHY_PAGE_SHIFT 5 #define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ ((reg) & MAX_PHY_REG_ADDRESS)) -#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */ -#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */ +#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */ +#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */ #define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 #define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300 @@ -144,19 +151,20 @@ extern "C" { #define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400 #define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004 #define HV_STATS_PAGE 778 -#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision Count */ +/* Half-duplex collision counts */ +#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision */ #define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17) -#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. Count */ +#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. */ #define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19) -#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Coll. Count */ +#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Collision */ #define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21) -#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision Count */ +#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision */ #define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24) -#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision Count */ +#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision */ #define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26) #define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */ #define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28) -#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Transmit with no CRS */ +#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Tx with no CRS */ #define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30) #define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */ @@ -168,6 +176,16 @@ extern "C" { #define CV_SMB_CTRL PHY_REG(769, 23) #define CV_SMB_CTRL_FORCE_SMBUS 0x0001 +/* I218 Ultra Low Power Configuration 1 Register */ +#define I218_ULP_CONFIG1 PHY_REG(779, 16) +#define I218_ULP_CONFIG1_START 0x0001 /* Start auto ULP config */ +#define I218_ULP_CONFIG1_IND 0x0004 /* Pwr up from ULP indication */ +#define I218_ULP_CONFIG1_STICKY_ULP 0x0010 /* Set sticky ULP mode */ +#define I218_ULP_CONFIG1_INBAND_EXIT 0x0020 /* Inband on ULP exit */ +#define I218_ULP_CONFIG1_WOL_HOST 0x0040 /* WoL Host on ULP exit */ +#define I218_ULP_CONFIG1_RESET_TO_SMBUS 0x0100 /* Reset to SMBus mode */ +#define I218_ULP_CONFIG1_DISABLE_SMB_PERST 0x1000 /* Disable on PERST# */ + /* SMBus Address Phy Register */ #define HV_SMB_ADDR PHY_REG(768, 26) #define HV_SMB_ADDR_MASK 0x007F @@ -202,15 +220,28 @@ extern "C" { /* PHY Power Management Control */ #define HV_PM_CTRL PHY_REG(770, 17) #define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100 +#define HV_PM_CTRL_K1_ENABLE 0x4000 #define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */ +/* Inband Control */ +#define I217_INBAND_CTRL PHY_REG(770, 18) +#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK 0x3F00 +#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT 8 + +/* Low Power Idle GPIO Control */ +#define I217_LPI_GPIO_CTRL PHY_REG(772, 18) +#define I217_LPI_GPIO_CTRL_AUTO_EN_LPI 0x0800 + /* PHY Low Power Idle Control */ #define I82579_LPI_CTRL PHY_REG(772, 20) #define I82579_LPI_CTRL_100_ENABLE 0x2000 #define I82579_LPI_CTRL_1000_ENABLE 0x4000 #define I82579_LPI_CTRL_ENABLE_MASK 0x6000 -#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80 + +/* 82579 DFT Control */ +#define I82579_DFT_CTRL PHY_REG(769, 20) +#define I82579_DFT_CTRL_GATE_PHY_RESET 0x0040 /* Gate PHY Reset on MAC Reset */ /* Extended Management Interface (EMI) Registers */ #define I82579_EMI_ADDR 0x10 @@ -220,16 +251,19 @@ extern "C" { #define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */ #define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */ #define I82579_RX_CONFIG 0x3412 /* Receive configuration */ -#define I82579_EEE_PCS_STATUS 0x182D /* IEEE MMD Register 3.1 >> 8 */ +#define I82579_LPI_PLL_SHUT 0x4412 /* LPI PLL Shut Enable */ +#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */ #define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */ #define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */ #define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */ -#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE supported */ -#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE supported */ +#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */ +#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */ +#define I82579_LPI_100_PLL_SHUT (1 << 2) /* 100M LPI PLL Shut Enabled */ #define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */ #define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */ #define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */ #define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */ +#define I217_RX_CONFIG 0xB20C /* Receive configuration */ #define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */ #define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */ @@ -278,9 +312,8 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable); void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw); s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable); s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data); - -#ifdef __cplusplus -} -#endif - -#endif /* _E1000_ICH8LAN_H_ */ +s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data); +s32 e1000_set_eee_pchlan(struct e1000_hw *hw); +s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx); +s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force); +#endif /* _E1000_ICH8LAN_H_ */ diff --git a/usr/src/uts/common/io/e1000api/e1000_mac.c b/usr/src/uts/common/io/e1000api/e1000_mac.c index 9e6b30c49f..b888b341c4 100644 --- a/usr/src/uts/common/io/e1000api/e1000_mac.c +++ b/usr/src/uts/common/io/e1000api/e1000_mac.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2014, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -37,7 +37,7 @@ static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw); static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw); static void e1000_config_collision_dist_generic(struct e1000_hw *hw); -static void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index); +static int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index); /** * e1000_init_mac_ops_generic - Initialize MAC function pointers @@ -85,7 +85,7 @@ void e1000_init_mac_ops_generic(struct e1000_hw *hw) * e1000_null_ops_generic - No-op function, returns 0 * @hw: pointer to the HW structure **/ -s32 e1000_null_ops_generic(struct e1000_hw *hw) +s32 e1000_null_ops_generic(struct e1000_hw E1000_UNUSEDARG *hw) { DEBUGFUNC("e1000_null_ops_generic"); return E1000_SUCCESS; @@ -95,7 +95,7 @@ s32 e1000_null_ops_generic(struct e1000_hw *hw) * e1000_null_mac_generic - No-op function, return void * @hw: pointer to the HW structure **/ -void e1000_null_mac_generic(struct e1000_hw *hw) +void e1000_null_mac_generic(struct e1000_hw E1000_UNUSEDARG *hw) { DEBUGFUNC("e1000_null_mac_generic"); return; @@ -105,7 +105,8 @@ void e1000_null_mac_generic(struct e1000_hw *hw) * e1000_null_link_info - No-op function, return 0 * @hw: pointer to the HW structure **/ -s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d) +s32 e1000_null_link_info(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG *s, u16 E1000_UNUSEDARG *d) { DEBUGFUNC("e1000_null_link_info"); return E1000_SUCCESS; @@ -115,7 +116,8 @@ s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d) * e1000_null_mng_mode - No-op function, return FALSE * @hw: pointer to the HW structure **/ -bool e1000_null_mng_mode(struct e1000_hw *hw) { +bool e1000_null_mng_mode(struct e1000_hw E1000_UNUSEDARG *hw) +{ DEBUGFUNC("e1000_null_mng_mode"); return FALSE; } @@ -124,7 +126,8 @@ bool e1000_null_mng_mode(struct e1000_hw *hw) { * e1000_null_update_mc - No-op function, return void * @hw: pointer to the HW structure **/ -void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a) +void e1000_null_update_mc(struct e1000_hw E1000_UNUSEDARG *hw, + u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a) { DEBUGFUNC("e1000_null_update_mc"); return; @@ -134,27 +137,30 @@ void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a) * e1000_null_write_vfta - No-op function, return void * @hw: pointer to the HW structure **/ -void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b) +void e1000_null_write_vfta(struct e1000_hw E1000_UNUSEDARG *hw, + u32 E1000_UNUSEDARG a, u32 E1000_UNUSEDARG b) { DEBUGFUNC("e1000_null_write_vfta"); return; } /** - * e1000_null_rar_set - No-op function, return void + * e1000_null_rar_set - No-op function, return 0 * @hw: pointer to the HW structure **/ -void e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a) +int e1000_null_rar_set(struct e1000_hw E1000_UNUSEDARG *hw, + u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a) { DEBUGFUNC("e1000_null_rar_set"); - return; + return E1000_SUCCESS; } /** * e1000_null_set_obff_timer - No-op function, return 0 * @hw: pointer to the HW structure **/ -s32 e1000_null_set_obff_timer(struct e1000_hw *hw, u32 a) +s32 e1000_null_set_obff_timer(struct e1000_hw E1000_UNUSEDARG *hw, + u32 E1000_UNUSEDARG a) { DEBUGFUNC("e1000_null_set_obff_timer"); return E1000_SUCCESS; @@ -469,7 +475,7 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) * Sets the receive address array register at index to the address passed * in by addr. **/ -static void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) +static int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) { u32 rar_low, rar_high; @@ -495,6 +501,8 @@ static void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) E1000_WRITE_FLUSH(hw); E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); E1000_WRITE_FLUSH(hw); + + return E1000_SUCCESS; } /** @@ -940,6 +948,7 @@ s32 e1000_set_default_fc_generic(struct e1000_hw *hw) { s32 ret_val; u16 nvm_data; + u16 nvm_offset = 0; DEBUGFUNC("e1000_set_default_fc_generic"); @@ -951,7 +960,18 @@ s32 e1000_set_default_fc_generic(struct e1000_hw *hw) * control setting, then the variable hw->fc will * be initialized based on a value in the EEPROM. */ - ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); + if (hw->mac.type == e1000_i350) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func); + ret_val = hw->nvm.ops.read(hw, + NVM_INIT_CONTROL2_REG + + nvm_offset, + 1, &nvm_data); + } else { + ret_val = hw->nvm.ops.read(hw, + NVM_INIT_CONTROL2_REG, + 1, &nvm_data); + } + if (ret_val) { DEBUGOUT("NVM Read Error\n"); @@ -1675,7 +1695,7 @@ s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed, * Sets the speed and duplex to gigabit full duplex (the only possible option) * for fiber/serdes links. **/ -s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw, +s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw E1000_UNUSEDARG *hw, u16 *speed, u16 *duplex) { DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic"); @@ -2078,7 +2098,8 @@ s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw) while (timeout) { if (!(E1000_READ_REG(hw, E1000_STATUS) & - E1000_STATUS_GIO_MASTER_ENABLE)) + E1000_STATUS_GIO_MASTER_ENABLE) || + E1000_REMOVED(hw->hw_addr)) break; usec_delay(100); timeout--; @@ -2187,7 +2208,7 @@ static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw) * Validate the MDI/MDIx setting, allowing for auto-crossover during forced * operation. **/ -s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw *hw) +s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw E1000_UNUSEDARG *hw) { DEBUGFUNC("e1000_validate_mdi_setting_crossover_generic"); diff --git a/usr/src/uts/common/io/e1000api/e1000_mac.h b/usr/src/uts/common/io/e1000api/e1000_mac.h index b855030824..2c1bfe3243 100644 --- a/usr/src/uts/common/io/e1000api/e1000_mac.h +++ b/usr/src/uts/common/io/e1000api/e1000_mac.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2014, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -35,18 +35,17 @@ #ifndef _E1000_MAC_H_ #define _E1000_MAC_H_ -#ifdef __cplusplus -extern "C" { -#endif - void e1000_init_mac_ops_generic(struct e1000_hw *hw); +#ifndef E1000_REMOVED +#define E1000_REMOVED(a) (0) +#endif /* E1000_REMOVED */ void e1000_null_mac_generic(struct e1000_hw *hw); s32 e1000_null_ops_generic(struct e1000_hw *hw); s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d); bool e1000_null_mng_mode(struct e1000_hw *hw); void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a); void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b); -void e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a); +int e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a); s32 e1000_null_set_obff_timer(struct e1000_hw *hw, u32 a); s32 e1000_blink_led_generic(struct e1000_hw *hw); s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw); @@ -95,8 +94,4 @@ void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop); void e1000_update_adaptive_generic(struct e1000_hw *hw); void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); -#ifdef __cplusplus -} #endif - -#endif /* _E1000_MAC_H_ */ diff --git a/usr/src/uts/common/io/e1000api/e1000_manage.c b/usr/src/uts/common/io/e1000api/e1000_manage.c index c58d32d16b..8087e656c5 100644 --- a/usr/src/uts/common/io/e1000api/e1000_manage.c +++ b/usr/src/uts/common/io/e1000api/e1000_manage.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2014, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -364,9 +364,12 @@ bool e1000_enable_mng_pass_thru(struct e1000_hw *hw) } else if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) { u16 data; + s32 ret_val; factps = E1000_READ_REG(hw, E1000_FACTPS); - e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); + ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); + if (ret_val) + return FALSE; if (!(factps & E1000_FACTPS_MNGCG) && ((data & E1000_NVM_INIT_CTRL2_MNGM) == @@ -374,7 +377,7 @@ bool e1000_enable_mng_pass_thru(struct e1000_hw *hw) return TRUE; } else if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN)) { - return TRUE; + return TRUE; } return FALSE; diff --git a/usr/src/uts/common/io/e1000api/e1000_manage.h b/usr/src/uts/common/io/e1000api/e1000_manage.h index 68bf3a4b16..51f176719c 100644 --- a/usr/src/uts/common/io/e1000api/e1000_manage.h +++ b/usr/src/uts/common/io/e1000api/e1000_manage.h @@ -35,10 +35,6 @@ #ifndef _E1000_MANAGE_H_ #define _E1000_MANAGE_H_ -#ifdef __cplusplus -extern "C" { -#endif - bool e1000_check_mng_mode_generic(struct e1000_hw *hw); bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw); s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw); @@ -97,8 +93,4 @@ enum e1000_mng_mode { /* Intel(R) Active Management Technology signature */ #define E1000_IAMT_SIGNATURE 0x544D4149 -#ifdef __cplusplus -} #endif - -#endif /* _E1000_MANAGE_H_ */ diff --git a/usr/src/uts/common/io/e1000api/e1000_mbx.c b/usr/src/uts/common/io/e1000api/e1000_mbx.c index 14af886738..55477b2a6a 100644 --- a/usr/src/uts/common/io/e1000api/e1000_mbx.c +++ b/usr/src/uts/common/io/e1000api/e1000_mbx.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2010, Intel Corporation + Copyright (c) 2001-2014, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -38,7 +38,8 @@ * e1000_null_mbx_check_for_flag - No-op function, return 0 * @hw: pointer to the HW structure **/ -static s32 e1000_null_mbx_check_for_flag(struct e1000_hw *hw, u16 mbx_id) +static s32 e1000_null_mbx_check_for_flag(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG mbx_id) { DEBUGFUNC("e1000_null_mbx_check_flag"); @@ -49,8 +50,10 @@ static s32 e1000_null_mbx_check_for_flag(struct e1000_hw *hw, u16 mbx_id) * e1000_null_mbx_transact - No-op function, return 0 * @hw: pointer to the HW structure **/ -static s32 e1000_null_mbx_transact(struct e1000_hw *hw, u32 *msg, u16 size, - u16 mbx_id) +static s32 e1000_null_mbx_transact(struct e1000_hw E1000_UNUSEDARG *hw, + u32 E1000_UNUSEDARG *msg, + u16 E1000_UNUSEDARG size, + u16 E1000_UNUSEDARG mbx_id) { DEBUGFUNC("e1000_null_mbx_rw_msg"); @@ -354,7 +357,8 @@ static s32 e1000_check_for_bit_vf(struct e1000_hw *hw, u32 mask) * * returns SUCCESS if the PF has set the Status bit or else ERR_MBX **/ -static s32 e1000_check_for_msg_vf(struct e1000_hw *hw, u16 mbx_id) +static s32 e1000_check_for_msg_vf(struct e1000_hw *hw, + u16 E1000_UNUSEDARG mbx_id) { s32 ret_val = -E1000_ERR_MBX; @@ -375,7 +379,8 @@ static s32 e1000_check_for_msg_vf(struct e1000_hw *hw, u16 mbx_id) * * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX **/ -static s32 e1000_check_for_ack_vf(struct e1000_hw *hw, u16 mbx_id) +static s32 e1000_check_for_ack_vf(struct e1000_hw *hw, + u16 E1000_UNUSEDARG mbx_id) { s32 ret_val = -E1000_ERR_MBX; @@ -396,14 +401,15 @@ static s32 e1000_check_for_ack_vf(struct e1000_hw *hw, u16 mbx_id) * * returns TRUE if the PF has set the reset done bit or else FALSE **/ -static s32 e1000_check_for_rst_vf(struct e1000_hw *hw, u16 mbx_id) +static s32 e1000_check_for_rst_vf(struct e1000_hw *hw, + u16 E1000_UNUSEDARG mbx_id) { s32 ret_val = -E1000_ERR_MBX; DEBUGFUNC("e1000_check_for_rst_vf"); if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD | - E1000_V2PMAILBOX_RSTI))) { + E1000_V2PMAILBOX_RSTI))) { ret_val = E1000_SUCCESS; hw->mbx.stats.rsts++; } @@ -443,7 +449,7 @@ static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw) * returns SUCCESS if it successfully copied message into the buffer **/ static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size, - u16 mbx_id) + u16 E1000_UNUSEDARG mbx_id) { s32 ret_val; u16 i; @@ -484,7 +490,7 @@ out_no_write: * returns SUCCESS if it successfuly read message from buffer **/ static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size, - u16 mbx_id) + u16 E1000_UNUSEDARG mbx_id) { s32 ret_val = E1000_SUCCESS; u16 i; @@ -657,7 +663,7 @@ static s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) * returns SUCCESS if it successfully copied message into the buffer **/ static s32 e1000_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, - u16 vf_number) + u16 vf_number) { s32 ret_val; u16 i; @@ -700,7 +706,7 @@ out_no_write: * a message due to a VF request so no polling for message is needed. **/ static s32 e1000_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, - u16 vf_number) + u16 vf_number) { s32 ret_val; u16 i; @@ -739,6 +745,7 @@ s32 e1000_init_mbx_params_pf(struct e1000_hw *hw) switch (hw->mac.type) { case e1000_82576: case e1000_i350: + case e1000_i354: mbx->timeout = 0; mbx->usec_delay = 0; diff --git a/usr/src/uts/common/io/e1000api/e1000_mbx.h b/usr/src/uts/common/io/e1000api/e1000_mbx.h index c66ee7a78b..d2aea5c497 100644 --- a/usr/src/uts/common/io/e1000api/e1000_mbx.h +++ b/usr/src/uts/common/io/e1000api/e1000_mbx.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2010, Intel Corporation + Copyright (c) 2001-2014, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -35,66 +35,62 @@ #ifndef _E1000_MBX_H_ #define _E1000_MBX_H_ -#ifdef __cplusplus -extern "C" { -#endif - #include "e1000_api.h" /* Define mailbox register bits */ -#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ -#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */ -#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ -#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ -#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ -#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ -#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */ -#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ +#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ +#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */ +#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ +#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ +#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */ +#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ #define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ -#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ -#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ -#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ -#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ -#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ +#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ #define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ -#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ #define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ -#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ +#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ -#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ +#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ /* If it's a E1000_VF_* msg then it originates in the VF and is sent to the * PF. The reverse is TRUE if it is E1000_PF_*. * Message ACK's are the value or'd with 0xF0000000 */ -#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with - * this are the ACK */ -#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with - * this are the NACK */ -#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still - clear to send requests */ -#define E1000_VT_MSGINFO_SHIFT 16 -/* bits 23:16 are used for exra info for certain messages */ -#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) - -#define E1000_VF_RESET 0x01 /* VF requests reset */ -#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ -#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ +/* Msgs below or'd with this are the ACK */ +#define E1000_VT_MSGTYPE_ACK 0x80000000 +/* Msgs below or'd with this are the NACK */ +#define E1000_VT_MSGTYPE_NACK 0x40000000 +/* Indicates that VF is still clear to send requests */ +#define E1000_VT_MSGTYPE_CTS 0x20000000 +#define E1000_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for extra info for certain messages */ +#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) + +#define E1000_VF_RESET 0x01 /* VF requests reset */ +#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ +#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ #define E1000_VF_SET_MULTICAST_COUNT_MASK (0x1F << E1000_VT_MSGINFO_SHIFT) -#define E1000_VF_SET_MULTICAST_OVERFLOW (0x80 << E1000_VT_MSGINFO_SHIFT) -#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ -#define E1000_VF_SET_VLAN_ADD (0x01 << E1000_VT_MSGINFO_SHIFT) -#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */ -#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/ -#define E1000_VF_SET_PROMISC_UNICAST (0x01 << E1000_VT_MSGINFO_SHIFT) -#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_MULTICAST_OVERFLOW (0x80 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ +#define E1000_VF_SET_VLAN_ADD (0x01 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_LPE 0x05 /* reqs to set VMOLR.LPE */ +#define E1000_VF_SET_PROMISC 0x06 /* reqs to clear VMOLR.ROPE/MPME*/ +#define E1000_VF_SET_PROMISC_UNICAST (0x01 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) -#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ +#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ -#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ -#define E1000_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ +#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define E1000_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ s32 e1000_read_mbx(struct e1000_hw *, u32 *, u16, u16); s32 e1000_write_mbx(struct e1000_hw *, u32 *, u16, u16); @@ -107,8 +103,4 @@ void e1000_init_mbx_ops_generic(struct e1000_hw *hw); s32 e1000_init_mbx_params_vf(struct e1000_hw *); s32 e1000_init_mbx_params_pf(struct e1000_hw *); -#ifdef __cplusplus -} -#endif - #endif /* _E1000_MBX_H_ */ diff --git a/usr/src/uts/common/io/e1000api/e1000_nvm.c b/usr/src/uts/common/io/e1000api/e1000_nvm.c index 3acbe7debc..f702f71b3a 100644 --- a/usr/src/uts/common/io/e1000api/e1000_nvm.c +++ b/usr/src/uts/common/io/e1000api/e1000_nvm.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2014, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -63,7 +63,9 @@ void e1000_init_nvm_ops_generic(struct e1000_hw *hw) * e1000_null_nvm_read - No-op function, return 0 * @hw: pointer to the HW structure **/ -s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c) +s32 e1000_null_read_nvm(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b, + u16 E1000_UNUSEDARG *c) { DEBUGFUNC("e1000_null_read_nvm"); return E1000_SUCCESS; @@ -73,7 +75,7 @@ s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c) * e1000_null_nvm_generic - No-op function, return void * @hw: pointer to the HW structure **/ -void e1000_null_nvm_generic(struct e1000_hw *hw) +void e1000_null_nvm_generic(struct e1000_hw E1000_UNUSEDARG *hw) { DEBUGFUNC("e1000_null_nvm_generic"); return; @@ -83,7 +85,8 @@ void e1000_null_nvm_generic(struct e1000_hw *hw) * e1000_null_led_default - No-op function, return 0 * @hw: pointer to the HW structure **/ -s32 e1000_null_led_default(struct e1000_hw *hw, u16 *data) +s32 e1000_null_led_default(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG *data) { DEBUGFUNC("e1000_null_led_default"); return E1000_SUCCESS; @@ -93,7 +96,9 @@ s32 e1000_null_led_default(struct e1000_hw *hw, u16 *data) * e1000_null_write_nvm - No-op function, return 0 * @hw: pointer to the HW structure **/ -s32 e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c) +s32 e1000_null_write_nvm(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b, + u16 E1000_UNUSEDARG *c) { DEBUGFUNC("e1000_null_write_nvm"); return E1000_SUCCESS; @@ -577,6 +582,9 @@ s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) E1000_NVM_RW_REG_DATA); } + if (ret_val) + DEBUGOUT1("NVM read error: %d\n", ret_val); + return ret_val; } @@ -767,6 +775,12 @@ s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, DEBUGFUNC("e1000_read_pba_string_generic"); + if ((hw->mac.type >= e1000_i210) && + !e1000_get_flash_presence_i210(hw)) { + DEBUGOUT("Flashless no PBA string\n"); + return -E1000_ERR_NVM_PBA_SECTION; + } + if (pba_num == NULL) { DEBUGOUT("PBA string buffer was null\n"); return -E1000_ERR_INVALID_ARGUMENT; @@ -974,7 +988,7 @@ s32 e1000_read_pba_raw(struct e1000_hw *hw, u16 *eeprom_buf, return ret_val; } else { if (eeprom_buf_size > (u32)(pba->word[1] + - pba->pba_block[0])) { + pba_block_size)) { memcpy(pba->pba_block, &eeprom_buf[pba->word[1]], pba_block_size * sizeof(u16)); @@ -1217,3 +1231,5 @@ static void e1000_reload_nvm_generic(struct e1000_hw *hw) E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); E1000_WRITE_FLUSH(hw); } + + diff --git a/usr/src/uts/common/io/e1000api/e1000_nvm.h b/usr/src/uts/common/io/e1000api/e1000_nvm.h index 69c2ab072d..34077b2498 100644 --- a/usr/src/uts/common/io/e1000api/e1000_nvm.h +++ b/usr/src/uts/common/io/e1000api/e1000_nvm.h @@ -35,10 +35,6 @@ #ifndef _E1000_NVM_H_ #define _E1000_NVM_H_ -#ifdef __cplusplus -extern "C" { -#endif - #if !defined(NO_READ_PBA_RAW) || !defined(NO_WRITE_PBA_RAW) struct e1000_pba { u16 word[2]; @@ -83,8 +79,4 @@ void e1000_release_nvm_generic(struct e1000_hw *hw); #define E1000_STM_OPCODE 0xDB00 -#ifdef __cplusplus -} #endif - -#endif /* _E1000_NVM_H_ */ diff --git a/usr/src/uts/common/io/e1000api/e1000_phy.c b/usr/src/uts/common/io/e1000api/e1000_phy.c index 241c1d5d30..f27889c83c 100644 --- a/usr/src/uts/common/io/e1000api/e1000_phy.c +++ b/usr/src/uts/common/io/e1000api/e1000_phy.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2014, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -104,7 +104,8 @@ void e1000_init_phy_ops_generic(struct e1000_hw *hw) * e1000_null_set_page - No-op function, return 0 * @hw: pointer to the HW structure **/ -s32 e1000_null_set_page(struct e1000_hw *hw, u16 data) +s32 e1000_null_set_page(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG data) { DEBUGFUNC("e1000_null_set_page"); return E1000_SUCCESS; @@ -114,7 +115,8 @@ s32 e1000_null_set_page(struct e1000_hw *hw, u16 data) * e1000_null_read_reg - No-op function, return 0 * @hw: pointer to the HW structure **/ -s32 e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data) +s32 e1000_null_read_reg(struct e1000_hw E1000_UNUSEDARG *hw, + u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG *data) { DEBUGFUNC("e1000_null_read_reg"); return E1000_SUCCESS; @@ -124,7 +126,7 @@ s32 e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data) * e1000_null_phy_generic - No-op function, return void * @hw: pointer to the HW structure **/ -void e1000_null_phy_generic(struct e1000_hw *hw) +void e1000_null_phy_generic(struct e1000_hw E1000_UNUSEDARG *hw) { DEBUGFUNC("e1000_null_phy_generic"); return; @@ -134,7 +136,8 @@ void e1000_null_phy_generic(struct e1000_hw *hw) * e1000_null_lplu_state - No-op function, return 0 * @hw: pointer to the HW structure **/ -s32 e1000_null_lplu_state(struct e1000_hw *hw, bool active) +s32 e1000_null_lplu_state(struct e1000_hw E1000_UNUSEDARG *hw, + bool E1000_UNUSEDARG active) { DEBUGFUNC("e1000_null_lplu_state"); return E1000_SUCCESS; @@ -144,7 +147,8 @@ s32 e1000_null_lplu_state(struct e1000_hw *hw, bool active) * e1000_null_write_reg - No-op function, return 0 * @hw: pointer to the HW structure **/ -s32 e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data) +s32 e1000_null_write_reg(struct e1000_hw E1000_UNUSEDARG *hw, + u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG data) { DEBUGFUNC("e1000_null_write_reg"); return E1000_SUCCESS; @@ -158,8 +162,10 @@ s32 e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data) * @data: data value read * **/ -s32 e1000_read_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset, - u8 dev_addr, u8 *data) +s32 e1000_read_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw, + u8 E1000_UNUSEDARG byte_offset, + u8 E1000_UNUSEDARG dev_addr, + u8 E1000_UNUSEDARG *data) { DEBUGFUNC("e1000_read_i2c_byte_null"); return E1000_SUCCESS; @@ -173,10 +179,10 @@ s32 e1000_read_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset, * @data: data value to write * **/ -s32 e1000_write_i2c_byte_null(struct e1000_hw *hw, - u8 byte_offset, - u8 dev_addr, - u8 data) +s32 e1000_write_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw, + u8 E1000_UNUSEDARG byte_offset, + u8 E1000_UNUSEDARG dev_addr, + u8 E1000_UNUSEDARG data) { DEBUGFUNC("e1000_write_i2c_byte_null"); return E1000_SUCCESS; @@ -302,7 +308,7 @@ s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) * the lower time out */ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { - usec_delay(50); + usec_delay_irq(50); mdic = E1000_READ_REG(hw, E1000_MDIC); if (mdic & E1000_MDIC_READY) break; @@ -327,7 +333,7 @@ s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) * reading duplicate data in the next MDIC transaction. */ if (hw->mac.type == e1000_pch2lan) - usec_delay(100); + usec_delay_irq(100); return E1000_SUCCESS; } @@ -368,7 +374,7 @@ s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) * the lower time out */ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { - usec_delay(50); + usec_delay_irq(50); mdic = E1000_READ_REG(hw, E1000_MDIC); if (mdic & E1000_MDIC_READY) break; @@ -392,7 +398,7 @@ s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) * reading duplicate data in the next MDIC transaction. */ if (hw->mac.type == e1000_pch2lan) - usec_delay(100); + usec_delay_irq(100); return E1000_SUCCESS; } @@ -1054,16 +1060,12 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) } } - /* Enable CRS on Tx. This must be set for half-duplex operation. - * Not required on some PHYs. - */ + /* Enable CRS on Tx. This must be set for half-duplex operation. */ ret_val = hw->phy.ops.read_reg(hw, I82577_CFG_REG, &phy_data); if (ret_val) return ret_val; - if ((hw->phy.type != e1000_phy_82579) && - (hw->phy.type != e1000_phy_i217)) - phy_data |= I82577_CFG_ASSERT_CRS_ON_TX; + phy_data |= I82577_CFG_ASSERT_CRS_ON_TX; /* Enable downshift */ phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; @@ -1249,12 +1251,6 @@ s32 e1000_copper_link_setup_m88(struct e1000_hw *hw) return ret_val; } - if (phy->type == e1000_phy_i210) { - ret_val = e1000_set_master_slave_mode(hw); - if (ret_val) - return ret_val; - } - return E1000_SUCCESS; } @@ -1318,6 +1314,20 @@ s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw) phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; /* Enable downshift and setting it to X6 */ + if (phy->id == M88E1543_E_PHY_ID) { + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE; + ret_val = + phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + } + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK; phy_data |= I347AT4_PSCR_DOWNSHIFT_6X; phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE; @@ -1333,6 +1343,10 @@ s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw) return ret_val; } + ret_val = e1000_set_master_slave_mode(hw); + if (ret_val) + return ret_val; + return E1000_SUCCESS; } @@ -1847,6 +1861,8 @@ s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw) case I347AT4_E_PHY_ID: case M88E1340M_E_PHY_ID: case M88E1112_E_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: case I210_I_PHY_ID: reset_dsp = FALSE; break; @@ -1889,6 +1905,9 @@ s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw) return E1000_SUCCESS; if (hw->phy.id == I210_I_PHY_ID) return E1000_SUCCESS; + if ((hw->phy.id == M88E1543_E_PHY_ID) || + (hw->phy.id == M88E1512_E_PHY_ID)) + return E1000_SUCCESS; ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; @@ -2194,9 +2213,9 @@ s32 e1000_check_polarity_m88(struct e1000_hw *hw) ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data); if (!ret_val) - phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) - ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; + phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); return ret_val; } @@ -2240,9 +2259,9 @@ s32 e1000_check_polarity_igp(struct e1000_hw *hw) ret_val = phy->ops.read_reg(hw, offset, &data); if (!ret_val) - phy->cable_polarity = (data & mask) - ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; + phy->cable_polarity = ((data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); return ret_val; } @@ -2274,9 +2293,9 @@ s32 e1000_check_polarity_ife(struct e1000_hw *hw) ret_val = phy->ops.read_reg(hw, offset, &phy_data); if (!ret_val) - phy->cable_polarity = (phy_data & mask) + phy->cable_polarity = ((phy_data & mask) ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; + : e1000_rev_polarity_normal); return ret_val; } @@ -2343,19 +2362,23 @@ s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, * it across the board. */ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); - if (ret_val) + if (ret_val) { /* If the first read fails, another entity may have * ownership of the resources, wait and try again to * see if they have relinquished the resources yet. */ - usec_delay(usec_interval); + if (usec_interval >= 1000) + msec_delay(usec_interval/1000); + else + usec_delay(usec_interval); + } ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); if (ret_val) break; if (phy_status & MII_SR_LINK_STATUS) break; if (usec_interval >= 1000) - msec_delay_irq(usec_interval/1000); + msec_delay(usec_interval/1000); else usec_delay(usec_interval); } @@ -2392,8 +2415,8 @@ s32 e1000_get_cable_length_m88(struct e1000_hw *hw) if (ret_val) return ret_val; - index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> - M88E1000_PSSR_CABLE_LENGTH_SHIFT; + index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT); if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) return -E1000_ERR_PHY; @@ -2437,6 +2460,8 @@ s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw) phy->max_cable_length = phy_data / (is_cm ? 100 : 1); phy->cable_length = phy_data / (is_cm ? 100 : 1); break; + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: case M88E1340M_E_PHY_ID: case I347AT4_E_PHY_ID: /* Remember the original page select and set it to 7 */ @@ -2554,8 +2579,8 @@ s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw) * that can be put into the lookup table to obtain the * approximate cable length. */ - cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & - IGP02E1000_AGC_LENGTH_MASK; + cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK); /* Array index bound check. */ if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || @@ -2578,8 +2603,8 @@ s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw) agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); /* Calculate cable length with the error range of +/- 10 meters. */ - phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? - (agc_value - IGP02E1000_AGC_RANGE) : 0; + phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0); phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; @@ -2763,9 +2788,9 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw) return ret_val; } else { /* Polarity is forced */ - phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY) - ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; + phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); } ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data); @@ -2863,7 +2888,7 @@ s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw) * Generic function to wait 10 milli-seconds for configuration to complete * and return success. **/ -s32 e1000_get_cfg_done_generic(struct e1000_hw *hw) +s32 e1000_get_cfg_done_generic(struct e1000_hw E1000_UNUSEDARG *hw) { DEBUGFUNC("e1000_get_cfg_done_generic"); @@ -2970,6 +2995,8 @@ enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id) case M88E1000_E_PHY_ID: case M88E1111_I_PHY_ID: case M88E1011_I_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: case I347AT4_E_PHY_ID: case M88E1112_E_PHY_ID: case M88E1340M_E_PHY_ID: @@ -3402,11 +3429,12 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data, bool read, bool page_set) { s32 ret_val; - u16 reg = BM_PHY_REG_NUM(offset); - u16 page = BM_PHY_REG_PAGE(offset); + u16 reg, page; u16 phy_reg = 0; DEBUGFUNC("e1000_access_phy_wakeup_reg_bm"); + reg = BM_PHY_REG_NUM(offset); + page = BM_PHY_REG_PAGE(offset); /* Gig must be disabled for MDIO accesses to Host Wakeup reg page */ if ((hw->mac.type == e1000_pchlan) && @@ -3464,16 +3492,10 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, void e1000_power_up_phy_copper(struct e1000_hw *hw) { u16 mii_reg = 0; - u16 power_reg = 0; /* The PHY will retain its settings across a power down/up cycle */ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); mii_reg &= ~MII_CR_POWER_DOWN; - if (hw->phy.type == e1000_phy_i210) { - hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg); - power_reg &= ~GS40G_CS_POWER_DOWN; - hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg); - } hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); } @@ -3488,17 +3510,10 @@ void e1000_power_up_phy_copper(struct e1000_hw *hw) void e1000_power_down_phy_copper(struct e1000_hw *hw) { u16 mii_reg = 0; - u16 power_reg = 0; /* The PHY will retain its settings across a power down/up cycle */ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); mii_reg |= MII_CR_POWER_DOWN; - /* i210 Phy requires an additional bit for power up/down */ - if (hw->phy.type == e1000_phy_i210) { - hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg); - power_reg |= GS40G_CS_POWER_DOWN; - hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg); - } hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); msec_delay(1); } @@ -3776,8 +3791,8 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, DEBUGFUNC("e1000_access_phy_debug_regs_hv"); /* This takes care of the difference with desktop vs mobile phy */ - addr_reg = (hw->phy.type == e1000_phy_82578) ? - I82578_ADDR_REG : I82577_ADDR_REG; + addr_reg = ((hw->phy.type == e1000_phy_82578) ? + I82578_ADDR_REG : I82577_ADDR_REG); data_reg = addr_reg + 1; /* All operations in this function are phy address 2 */ @@ -3833,8 +3848,8 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) if (ret_val) return ret_val; - data &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | - BM_CS_STATUS_SPEED_MASK; + data &= (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_MASK); if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | BM_CS_STATUS_SPEED_1000)) @@ -3872,9 +3887,9 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw) ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); if (!ret_val) - phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) - ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; + phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); return ret_val; } @@ -4009,8 +4024,8 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw) if (ret_val) return ret_val; - length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >> - I82577_DSTATUS_CABLE_LENGTH_SHIFT; + length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >> + I82577_DSTATUS_CABLE_LENGTH_SHIFT); if (length == E1000_CABLE_LENGTH_UNDEFINED) return -E1000_ERR_PHY; @@ -4083,3 +4098,157 @@ release: return ret_val; } +/** + * e1000_read_phy_reg_mphy - Read mPHY control register + * @hw: pointer to the HW structure + * @address: address to be read + * @data: pointer to the read data + * + * Reads the mPHY control register in the PHY at offset and stores the + * information read to data. + **/ +s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data) +{ + u32 mphy_ctrl = 0; + bool locked = FALSE; + bool ready; + + DEBUGFUNC("e1000_read_phy_reg_mphy"); + + /* Check if mPHY is ready to read/write operations */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + + /* Check if mPHY access is disabled and enable it if so */ + mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); + if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) { + locked = TRUE; + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + mphy_ctrl |= E1000_MPHY_ENA_ACCESS; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); + } + + /* Set the address that we want to read */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + + /* We mask address, because we want to use only current lane */ + mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK & + ~E1000_MPHY_ADDRESS_FNC_OVERRIDE) | + (address & E1000_MPHY_ADDRESS_MASK); + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); + + /* Read data from the address */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + *data = E1000_READ_REG(hw, E1000_MPHY_DATA); + + /* Disable access to mPHY if it was originally disabled */ + if (locked) + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, + E1000_MPHY_DIS_ACCESS); + + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg_mphy - Write mPHY control register + * @hw: pointer to the HW structure + * @address: address to write to + * @data: data to write to register at offset + * @line_override: used when we want to use different line than default one + * + * Writes data to mPHY control register. + **/ +s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data, + bool line_override) +{ + u32 mphy_ctrl = 0; + bool locked = FALSE; + bool ready; + + DEBUGFUNC("e1000_write_phy_reg_mphy"); + + /* Check if mPHY is ready to read/write operations */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + + /* Check if mPHY access is disabled and enable it if so */ + mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); + if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) { + locked = TRUE; + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + mphy_ctrl |= E1000_MPHY_ENA_ACCESS; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); + } + + /* Set the address that we want to read */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + + /* We mask address, because we want to use only current lane */ + if (line_override) + mphy_ctrl |= E1000_MPHY_ADDRESS_FNC_OVERRIDE; + else + mphy_ctrl &= ~E1000_MPHY_ADDRESS_FNC_OVERRIDE; + mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK) | + (address & E1000_MPHY_ADDRESS_MASK); + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); + + /* Read data from the address */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + E1000_WRITE_REG(hw, E1000_MPHY_DATA, data); + + /* Disable access to mPHY if it was originally disabled */ + if (locked) + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, + E1000_MPHY_DIS_ACCESS); + + return E1000_SUCCESS; +} + +/** + * e1000_is_mphy_ready - Check if mPHY control register is not busy + * @hw: pointer to the HW structure + * + * Returns mPHY control register status. + **/ +bool e1000_is_mphy_ready(struct e1000_hw *hw) +{ + u16 retry_count = 0; + u32 mphy_ctrl = 0; + bool ready = FALSE; + + while (retry_count < 2) { + mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); + if (mphy_ctrl & E1000_MPHY_BUSY) { + usec_delay(20); + retry_count++; + continue; + } + ready = TRUE; + break; + } + + if (!ready) + DEBUGOUT("ERROR READING mPHY control register, phy is busy.\n"); + + return ready; +} diff --git a/usr/src/uts/common/io/e1000api/e1000_phy.h b/usr/src/uts/common/io/e1000api/e1000_phy.h index 13f03fe63f..0e5b2e6ac6 100644 --- a/usr/src/uts/common/io/e1000api/e1000_phy.h +++ b/usr/src/uts/common/io/e1000api/e1000_phy.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2014, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -35,10 +35,6 @@ #ifndef _E1000_PHY_H_ #define _E1000_PHY_H_ -#ifdef __cplusplus -extern "C" { -#endif - void e1000_init_phy_ops_generic(struct e1000_hw *hw); s32 e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data); void e1000_null_phy_generic(struct e1000_hw *hw); @@ -120,6 +116,10 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw); s32 e1000_get_cable_length_82577(struct e1000_hw *hw); s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data); s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data); +s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data, + bool line_override); +bool e1000_is_mphy_ready(struct e1000_hw *hw); #define E1000_MAX_PHY_ADDR 8 @@ -144,7 +144,6 @@ s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data); #define GS40G_MAC_LB 0x4140 #define GS40G_MAC_SPEED_1G 0X0006 #define GS40G_COPPER_SPEC 0x0010 -#define GS40G_CS_POWER_DOWN 0x0002 /* BM/HV Specific Registers */ #define BM_PORT_CTRL_PAGE 769 @@ -174,7 +173,7 @@ s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data); #define I82577_ADDR_REG 16 #define I82577_CFG_REG 22 #define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) -#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ +#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */ #define I82577_CTRL_REG 23 /* 82577 specific PHY registers */ @@ -205,6 +204,12 @@ s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data); #define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */ #define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */ +#define E1000_MPHY_DIS_ACCESS 0x80000000 /* disable_access bit */ +#define E1000_MPHY_ENA_ACCESS 0x40000000 /* enable_access bit */ +#define E1000_MPHY_BUSY 0x00010000 /* busy bit */ +#define E1000_MPHY_ADDRESS_FNC_OVERRIDE 0x20000000 /* fnc_override bit */ +#define E1000_MPHY_ADDRESS_MASK 0x0000FFFF /* address mask */ + /* BM PHY Copper Specific Control 1 */ #define BM_CS_CTRL1 16 @@ -220,6 +225,7 @@ s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data); #define HV_M_STATUS_AUTONEG_COMPLETE 0x1000 #define HV_M_STATUS_SPEED_MASK 0x0300 #define HV_M_STATUS_SPEED_1000 0x0200 +#define HV_M_STATUS_SPEED_100 0x0100 #define HV_M_STATUS_LINK_UP 0x0040 #define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 @@ -251,7 +257,7 @@ s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data); #define IGP02E1000_PHY_AGC_C 0x14B1 #define IGP02E1000_PHY_AGC_D 0x18B1 -#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */ #define IGP02E1000_AGC_LENGTH_MASK 0x7F #define IGP02E1000_AGC_RANGE 15 @@ -271,8 +277,8 @@ s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data); #define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */ #define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 -#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */ -#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */ +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */ #define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */ /* IFE PHY Extended Status Control */ @@ -316,8 +322,4 @@ struct sfp_e1000_flags { #define E1000_SFF_VENDOR_OUI_AVAGO 0x00176A00 #define E1000_SFF_VENDOR_OUI_INTEL 0x001B2100 -#ifdef __cplusplus -} #endif - -#endif /* _E1000_PHY_H_ */ diff --git a/usr/src/uts/common/io/e1000api/e1000_regs.h b/usr/src/uts/common/io/e1000api/e1000_regs.h index ea552bb260..5c2e3f788c 100644 --- a/usr/src/uts/common/io/e1000api/e1000_regs.h +++ b/usr/src/uts/common/io/e1000api/e1000_regs.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2014, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -35,10 +35,6 @@ #ifndef _E1000_REGS_H_ #define _E1000_REGS_H_ -#ifdef __cplusplus -extern "C" { -#endif - #define E1000_CTRL 0x00000 /* Device Control - RW */ #define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ #define E1000_STATUS 0x00008 /* Device Status - RO */ @@ -54,15 +50,16 @@ extern "C" { #define E1000_BARCTRL 0x5BBC /* BAR ctrl reg */ #define E1000_BARCTRL_FLSIZE 0x0700 /* BAR ctrl Flsize */ #define E1000_BARCTRL_CSRSIZE 0x2000 /* BAR ctrl CSR size */ -#define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */ -#define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */ -#define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */ -#define E1000_PPHY_CTRL 0x5b48 /* PCIe PHY Control */ +#define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */ +#define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */ +#define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */ +#define E1000_PPHY_CTRL 0x5b48 /* PCIe PHY Control */ #define E1000_I350_BARCTRL 0x5BFC /* BAR ctrl reg */ #define E1000_I350_DTXMXPKTSZ 0x355C /* Maximum sent packet size reg*/ #define E1000_SCTL 0x00024 /* SerDes Control - RW */ #define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ #define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FEXT 0x0002C /* Future Extended - RW */ #define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */ #define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */ #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ @@ -102,7 +99,7 @@ extern "C" { #define E1000_TBT 0x00448 /* Tx Burst Timer - RW */ #define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ #define E1000_LEDCTL 0x00E00 /* LED Control - RW */ -#define E1000_LEDMUX 0x08130 /* LED MUX Control */ +#define E1000_LEDMUX 0x08130 /* LED MUX Control */ #define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ #define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ #define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ @@ -112,6 +109,7 @@ extern "C" { #define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */ #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ #define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ +#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */ #define E1000_FLASHT 0x01028 /* FLASH Timer Register */ #define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ #define E1000_FLSWCTL 0x01030 /* FLASH control register */ @@ -161,8 +159,8 @@ extern "C" { #define E1000_PBRWAC 0x024E8 /* Rx packet buffer wrap around counter - RO */ #define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */ #define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */ -#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */ -#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */ +#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */ +#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */ #define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */ #define E1000_I210_FLMNGCTL 0x12038 #define E1000_I210_FLMNGDATA 0x1203C @@ -219,8 +217,8 @@ extern "C" { /* Queues packet buffer size masks where _n can be 0-3 and _s 0-63 [kB] */ #define E1000_I210_TXPBS_SIZE(_n, _s) ((_s) << (6 * _n)) -#define E1000_MMDAC 13 /* MMD Access Control */ -#define E1000_MMDAAD 14 /* MMD Access Address/Data */ +#define E1000_MMDAC 13 /* MMD Access Control */ +#define E1000_MMDAAD 14 /* MMD Access Address/Data */ /* Convenience macros * @@ -498,8 +496,6 @@ extern "C" { #define E1000_PBACL 0x05B68 /* MSIx PBA Clear - Read/Write 1's to clear */ #define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ #define E1000_HOST_IF 0x08800 /* Host Interface */ -#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ -#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ #define E1000_HIBBA 0x8F40 /* Host Interface Buffer Base Address */ /* Flexible Host Filter Table */ #define E1000_FHFT(_n) (0x09000 + ((_n) * 0x100)) @@ -687,8 +683,4 @@ extern "C" { #define E1000_DOBFFCTL 0x3F24 /* DMA OBFF Control Register */ -#ifdef __cplusplus -} #endif - -#endif /* _E1000_REGS_H_ */ diff --git a/usr/src/uts/common/io/e1000api/e1000_vf.c b/usr/src/uts/common/io/e1000api/e1000_vf.c index d1286ad2a5..2cabac9fa4 100644 --- a/usr/src/uts/common/io/e1000api/e1000_vf.c +++ b/usr/src/uts/common/io/e1000api/e1000_vf.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2011, Intel Corporation + Copyright (c) 2001-2014, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -49,7 +49,7 @@ static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, static s32 e1000_init_hw_vf(struct e1000_hw *hw); static s32 e1000_reset_hw_vf(struct e1000_hw *hw); static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *, u32); -static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32); +static int e1000_rar_set_vf(struct e1000_hw *, u8 *, u32); static s32 e1000_read_mac_addr_vf(struct e1000_hw *); /** @@ -159,7 +159,7 @@ void e1000_init_function_pointers_vf(struct e1000_hw *hw) * In addition, the MAC registers to access PHY/NVM don't exist so we don't * even want any SW to attempt to use them. **/ -static s32 e1000_acquire_vf(struct e1000_hw *hw) +static s32 e1000_acquire_vf(struct e1000_hw E1000_UNUSEDARG *hw) { return -E1000_ERR_PHY; } @@ -172,7 +172,7 @@ static s32 e1000_acquire_vf(struct e1000_hw *hw) * In addition, the MAC registers to access PHY/NVM don't exist so we don't * even want any SW to attempt to use them. **/ -static void e1000_release_vf(struct e1000_hw *hw) +static void e1000_release_vf(struct e1000_hw E1000_UNUSEDARG *hw) { return; } @@ -183,7 +183,7 @@ static void e1000_release_vf(struct e1000_hw *hw) * * Virtual functions cannot change link. **/ -static s32 e1000_setup_link_vf(struct e1000_hw *hw) +static s32 e1000_setup_link_vf(struct e1000_hw E1000_UNUSEDARG *hw) { DEBUGFUNC("e1000_setup_link_vf"); @@ -320,7 +320,8 @@ static s32 e1000_init_hw_vf(struct e1000_hw *hw) * @addr: pointer to the receive address * @index receive address array register **/ -static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index) +static int e1000_rar_set_vf(struct e1000_hw *hw, u8 *addr, + u32 E1000_UNUSEDARG index) { struct e1000_mbx_info *mbx = &hw->mbx; u32 msgbuf[3]; @@ -341,6 +342,8 @@ static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index) if (!ret_val && (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK))) e1000_read_mac_addr_vf(hw); + + return E1000_SUCCESS; } /** diff --git a/usr/src/uts/common/io/e1000api/e1000_vf.h b/usr/src/uts/common/io/e1000api/e1000_vf.h index 6208f542d5..2a780741c2 100644 --- a/usr/src/uts/common/io/e1000api/e1000_vf.h +++ b/usr/src/uts/common/io/e1000api/e1000_vf.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2010, Intel Corporation + Copyright (c) 2001-2014, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -35,58 +35,56 @@ #ifndef _E1000_VF_H_ #define _E1000_VF_H_ -#ifdef __cplusplus -extern "C" { -#endif - #include "e1000_osdep.h" #include "e1000_regs.h" #include "e1000_defines.h" struct e1000_hw; -#define E1000_DEV_ID_82576_VF 0x10CA -#define E1000_DEV_ID_I350_VF 0x1520 +#define E1000_DEV_ID_82576_VF 0x10CA +#define E1000_DEV_ID_I350_VF 0x1520 -#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ +#define E1000_VF_INIT_TIMEOUT 200 /* Num of retries to clear RSTI */ /* Additional Descriptor Control definitions */ -#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ -#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */ /* SRRCTL bit definitions */ -#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ -#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 -#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ -#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000 -#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 -#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 -#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 -#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000 +#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ + (0x0C00C + ((_n) * 0x40))) +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000 #define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 -#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 -#define E1000_SRRCTL_DROP_EN 0x80000000 +#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 +#define E1000_SRRCTL_DROP_EN 0x80000000 -#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F -#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 +#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 /* Interrupt Defines */ -#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ -#define E1000_EITR(_n) (0x01680 + ((_n) << 2)) -#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ -#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ -#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ -#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ -#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ -#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ -#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ -#define E1000_IVAR_VALID 0x80 +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + ((_n) << 2)) +#define E1000_EICS 0x01520 /* Ext. Intr Cause Set -W0 */ +#define E1000_EIMS 0x01524 /* Ext. Intr Mask Set/Read -RW */ +#define E1000_EIMC 0x01528 /* Ext. Intr Mask Clear -WO */ +#define E1000_EIAC 0x0152C /* Ext. Intr Auto Clear -RW */ +#define E1000_EIAM 0x01530 /* Ext. Intr Ack Auto Clear Mask -RW */ +#define E1000_IVAR0 0x01700 /* Intr Vector Alloc (array) -RW */ +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes -RW */ +#define E1000_IVAR_VALID 0x80 /* Receive Descriptor - Advanced */ union e1000_adv_rx_desc { struct { - u64 pkt_addr; /* Packet buffer address */ - u64 hdr_addr; /* Header buffer address */ + u64 pkt_addr; /* Packet buffer address */ + u64 hdr_addr; /* Header buffer address */ } read; struct { struct { @@ -100,23 +98,23 @@ union e1000_adv_rx_desc { } hs_rss; } lo_dword; union { - u32 rss; /* RSS Hash */ + u32 rss; /* RSS Hash */ struct { - u16 ip_id; /* IP id */ - u16 csum; /* Packet Checksum */ + u16 ip_id; /* IP id */ + u16 csum; /* Packet Checksum */ } csum_ip; } hi_dword; } lower; struct { - u32 status_error; /* ext status/error */ - u16 length; /* Packet length */ - u16 vlan; /* VLAN tag */ + u32 status_error; /* ext status/error */ + u16 length; /* Packet length */ + u16 vlan; /* VLAN tag */ } upper; } wb; /* writeback */ }; -#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 -#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 /* Transmit Descriptor - Advanced */ union e1000_adv_tx_desc { @@ -133,15 +131,15 @@ union e1000_adv_tx_desc { }; /* Adv Transmit Descriptor Config Masks */ -#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ -#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ -#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ -#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ -#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ -#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ -#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ -#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ /* Context descriptors */ struct e1000_adv_tx_context_desc { @@ -151,11 +149,11 @@ struct e1000_adv_tx_context_desc { u32 mss_l4len_idx; }; -#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ -#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ -#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ -#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ -#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ enum e1000_mac_type { e1000_undefined = 0, @@ -210,7 +208,7 @@ struct e1000_mac_operations { s32 (*init_hw)(struct e1000_hw *); s32 (*setup_link)(struct e1000_hw *); void (*write_vfta)(struct e1000_hw *, u32, u32); - void (*rar_set)(struct e1000_hw *, u8*, u32); + int (*rar_set)(struct e1000_hw *, u8*, u32); s32 (*read_mac_addr)(struct e1000_hw *); }; @@ -295,9 +293,4 @@ s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); void e1000_vfta_set_vf(struct e1000_hw *, u16, bool); void e1000_rlpml_set_vf(struct e1000_hw *, u16); s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type); - -#ifdef __cplusplus -} -#endif - #endif /* _E1000_VF_H_ */ diff --git a/usr/src/uts/common/io/e1000g/e1000_osdep.h b/usr/src/uts/common/io/e1000g/e1000_osdep.h index 9853673b24..303d24848b 100644 --- a/usr/src/uts/common/io/e1000g/e1000_osdep.h +++ b/usr/src/uts/common/io/e1000g/e1000_osdep.h @@ -51,6 +51,7 @@ extern "C" { #include "e1000g_debug.h" #define usec_delay(x) drv_usecwait(x) +#define usec_delay_irq usec_delay #define msec_delay(x) drv_usecwait(x * 1000) #define msec_delay_irq msec_delay diff --git a/usr/src/uts/common/io/e1000g/e1000g_main.c b/usr/src/uts/common/io/e1000g/e1000g_main.c index f769457ed4..514b4d8e7d 100644 --- a/usr/src/uts/common/io/e1000g/e1000g_main.c +++ b/usr/src/uts/common/io/e1000g/e1000g_main.c @@ -25,7 +25,7 @@ /* * Copyright 2012 DEY Storage Systems, Inc. All rights reserved. * Copyright 2013 Nexenta Systems, Inc. All rights reserved. - * Copyright (c) 2014, Joyent, Inc. All rights reserved. + * Copyright (c) 2015, Joyent, Inc. */ /* @@ -494,6 +494,11 @@ e1000g_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) } /* + * Disable ULP support + */ + (void) e1000_disable_ulp_lpt_lp(hw, TRUE); + + /* * Initialize interrupts */ if (e1000g_add_intrs(Adapter) != DDI_SUCCESS) { @@ -2494,12 +2499,12 @@ e1000g_init_unicst(struct e1000g *Adapter) /* Workaround for an erratum of 82571 chipst */ if ((hw->mac.type == e1000_82571) && (e1000_get_laa_state_82571(hw) == B_TRUE)) - e1000_rar_set(hw, hw->mac.addr, LAST_RAR_ENTRY); + (void) e1000_rar_set(hw, hw->mac.addr, LAST_RAR_ENTRY); /* Re-configure the RAR registers */ for (slot = 0; slot < Adapter->unicst_total; slot++) if (Adapter->unicst_addr[slot].mac.set == 1) - e1000_rar_set(hw, + (void) e1000_rar_set(hw, Adapter->unicst_addr[slot].mac.addr, slot); } @@ -2541,7 +2546,7 @@ e1000g_unicst_set(struct e1000g *Adapter, const uint8_t *mac_addr, } else { bcopy(mac_addr, Adapter->unicst_addr[slot].mac.addr, ETHERADDRL); - e1000_rar_set(hw, (uint8_t *)mac_addr, slot); + (void) e1000_rar_set(hw, (uint8_t *)mac_addr, slot); Adapter->unicst_addr[slot].mac.set = 1; } @@ -2557,7 +2562,7 @@ e1000g_unicst_set(struct e1000g *Adapter, const uint8_t *mac_addr, (slot << 1) + 1, 0); E1000_WRITE_FLUSH(hw); } else { - e1000_rar_set(hw, (uint8_t *)mac_addr, + (void) e1000_rar_set(hw, (uint8_t *)mac_addr, LAST_RAR_ENTRY); } } @@ -4417,7 +4422,7 @@ e1000g_local_timer(void *ws) (ether_addr.mac.addr[2] != hw->mac.addr[3]) || (ether_addr.mac.addr[1] != hw->mac.addr[4]) || (ether_addr.mac.addr[0] != hw->mac.addr[5])) { - e1000_rar_set(hw, hw->mac.addr, 0); + (void) e1000_rar_set(hw, hw->mac.addr, 0); } } diff --git a/usr/src/uts/common/io/igb/e1000_osdep.h b/usr/src/uts/common/io/igb/e1000_osdep.h index afe04ffb76..f9ac1594bf 100644 --- a/usr/src/uts/common/io/igb/e1000_osdep.h +++ b/usr/src/uts/common/io/igb/e1000_osdep.h @@ -54,6 +54,7 @@ extern "C" { #include "igb_debug.h" #define usec_delay(x) drv_usecwait(x) +#define usec_delay_irq usec_delay #define msec_delay(x) drv_usecwait(x * 1000) #define msec_delay_irq msec_delay diff --git a/usr/src/uts/common/io/igb/igb_main.c b/usr/src/uts/common/io/igb/igb_main.c index 74c27a5802..3a215116fb 100644 --- a/usr/src/uts/common/io/igb/igb_main.c +++ b/usr/src/uts/common/io/igb/igb_main.c @@ -2615,7 +2615,8 @@ igb_init_unicst(igb_t *igb) } else { /* Re-configure the RAR registers */ for (slot = 0; slot < igb->unicst_total; slot++) { - e1000_rar_set_vmdq(hw, igb->unicst_addr[slot].mac.addr, + (void) e1000_rar_set_vmdq(hw, + igb->unicst_addr[slot].mac.addr, slot, igb->vmdq_mode, igb->unicst_addr[slot].mac.group_index); } @@ -2660,7 +2661,7 @@ igb_unicst_set(igb_t *igb, const uint8_t *mac_addr, /* * Set the unicast address to the RAR register */ - e1000_rar_set(hw, (uint8_t *)mac_addr, slot); + (void) e1000_rar_set(hw, (uint8_t *)mac_addr, slot); if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) { ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED); diff --git a/usr/src/uts/intel/bge/Makefile b/usr/src/uts/intel/bge/Makefile index 3d59fb4bf1..661d2b4fb3 100644 --- a/usr/src/uts/intel/bge/Makefile +++ b/usr/src/uts/intel/bge/Makefile @@ -38,7 +38,7 @@ UTSBASE = ../.. # MODULE = bge OBJECTS = $(BGE_OBJS:%=$(OBJS_DIR)/%) -LINTS = $(BGE_OBJS:%.o=$(LINTS_DIR)/%.ln) +LINTS = $(LINTS_DIR)/bge_lint.ln ROOTMODULE = $(ROOT_DRV_DIR)/$(MODULE) CONF_SRCDIR = $(UTSBASE)/common/io/bge @@ -67,6 +67,8 @@ LINTTAGS += -erroff=E_BAD_PTR_CAST_ALIGN CERRWARN += -_gcc=-Wno-uninitialized CERRWARN += -_gcc=-Wno-switch CERRWARN += -_gcc=-Wno-parentheses +CERRWARN += -_gcc=-Wno-unused-variable +CERRWARN += -_gcc=-Wno-unused-function # # Driver depends on MAC diff --git a/usr/src/uts/sparc/bge/Makefile b/usr/src/uts/sparc/bge/Makefile index df29b5b014..765db1019d 100644 --- a/usr/src/uts/sparc/bge/Makefile +++ b/usr/src/uts/sparc/bge/Makefile @@ -39,7 +39,7 @@ UTSBASE = ../.. # MODULE = bge OBJECTS = $(BGE_OBJS:%=$(OBJS_DIR)/%) -LINTS = $(BGE_OBJS:%.o=$(LINTS_DIR)/%.ln) +LINTS = $(LINTS_DIR)/bge_lint.ln ROOTMODULE = $(ROOT_DRV_DIR)/$(MODULE) CONF_SRCDIR = $(UTSBASE)/common/io/bge |
