diff -uNr net/drivers/net/cb_shim.c linux-2.4.20/drivers/net/cb_shim.c
--- net/drivers/net/cb_shim.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.4.20/drivers/net/cb_shim.c	2003-01-14 20:29:33.000000000 -0500
@@ -0,0 +1,296 @@
+/* cb_shim.c: Linux CardBus device support code. */
+/*
+	Written 1999-2002 by Donald Becker.
+
+	This software may be used and distributed according to the terms of
+	the GNU General Public License (GPL), incorporated herein by
+	reference.  This is not a documented interface.  Drivers incorporating
+	or interacting with these functions are derivative works and thus
+	are covered the GPL.  They must include an explicit GPL notice.
+
+	This code provides a shim to allow newer drivers to interact with the
+	older Cardbus driver activation code.  The functions supported are
+	attach, suspend, power-off, resume and eject.
+
+	The author may be reached as becker@scyld.com, or
+	Donald Becker
+	Scyld Computing Corporation
+	410 Severn Ave., Suite 210
+	Annapolis MD 21403
+
+	Support and updates available at
+	http://www.scyld.com/network/drivers.html
+
+	Other contributers:  (none yet)
+*/
+
+static const char version1[] =
+"cb_shim.c:v1.03 7/12/2002  Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" http://www.scyld.com/linux/drivers.html\n";
+
+/* Module options. */
+static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+
+/* These might be awkward to locate. */
+#include <pcmcia/driver_ops.h>
+#include "pci-scan.h"
+#include "kern_compat.h"
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Hot-swap-PCI and Cardbus event dispatch");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM_DESC(debug, "Enable additional status messages (0-7)");
+
+/* Note: this is used in a slightly sleazy manner: it is passed to routines
+   that expect and return just dev_node_t.  However using the too-simple
+   dev_node_t complicates devices management -- older drivers had to
+   look up dev_node_t.name in their private list. */
+
+struct registered_pci_device {
+	struct dev_node_t node;
+	int magic;
+	struct registered_pci_device *next;
+	struct drv_id_info *drv_info;
+	struct pci_dev *pci_loc;
+	void *dev_instance;
+} static *root_pci_devs = 0;
+
+struct drv_shim {
+	struct drv_id_info *did;
+	struct driver_operations drv_ops;
+	int magic;
+	struct drv_shim *next;
+} static *root_drv_id = 0;
+
+static void drv_power_op(struct dev_node_t *node, enum drv_pwr_action action)
+{
+	struct registered_pci_device **devp, **next, *rpin = (void *)node, *rp;
+	if (debug > 1)
+		printk(KERN_DEBUG "power operation(%s, %d).\n",
+			   rpin->drv_info->name, action);
+	/* With our wrapper structure we can almost do
+	   rpin->drv_info->pwr_event(rpin->dev_instance, action);
+	   But the detach operation requires us to remove the object from the
+	   list, so we check for uncontrolled "ghost" devices. */
+	for (devp = &root_pci_devs; *devp; devp = next) {
+		rp = *devp;
+		next = &rp->next;
+		if (rp == rpin) {
+			if (rp->drv_info->pwr_event)
+				rp->drv_info->pwr_event((*devp)->dev_instance, action);
+			else
+				printk(KERN_ERR "No power event hander for driver %s.\n",
+					   rpin->drv_info->name);
+			if (action == DRV_DETACH) {
+				kfree(rp);
+				*devp = *next;
+				MOD_DEC_USE_COUNT;
+			}
+			return;
+		}
+	}
+	if (debug)
+		printk(KERN_WARNING "power operation(%s, %d) for a ghost device.\n",
+			   node->dev_name, action);
+}
+/* Wrappers / static lambdas. */
+static void drv_suspend(struct dev_node_t *node)
+{
+	drv_power_op(node, DRV_SUSPEND);
+}
+static void drv_resume(struct dev_node_t *node)
+{
+	drv_power_op(node, DRV_RESUME);
+}
+static void drv_detach(struct dev_node_t *node)
+{
+	drv_power_op(node, DRV_DETACH);
+}
+
+/* The CardBus interaction does not identify the driver the attach() is
+   for, thus we must search for the ID in all PCI device tables.
+   While ugly, we likely only have one driver loaded anyway.
+*/
+static dev_node_t *drv_attach(struct dev_locator_t *loc)
+{
+	struct drv_shim *dp;
+	struct drv_id_info *drv_id = NULL;
+	struct pci_id_info *pci_tbl = NULL;
+	u32 pci_id, subsys_id, pci_rev, pciaddr;
+	u8 irq;
+	int chip_idx = 0, pci_flags, bus, devfn;
+	long ioaddr;
+	void *newdev;
+
+	if (debug > 1)
+		printk(KERN_INFO "drv_attach()\n");
+	if (loc->bus != LOC_PCI) return NULL;
+	bus = loc->b.pci.bus; devfn = loc->b.pci.devfn;
+	if (debug > 1)
+		printk(KERN_DEBUG "drv_attach(bus %d, function %d)\n", bus, devfn);
+
+	pcibios_read_config_dword(bus, devfn, PCI_VENDOR_ID, &pci_id);
+	pcibios_read_config_dword(bus, devfn, PCI_SUBSYSTEM_ID, &subsys_id);
+	pcibios_read_config_dword(bus, devfn, PCI_REVISION_ID, &pci_rev);
+	pcibios_read_config_byte(bus, devfn, PCI_INTERRUPT_LINE, &irq);
+	for (dp = root_drv_id; dp; dp = dp->next) {
+		drv_id = dp->did;
+		pci_tbl = drv_id->pci_dev_tbl;
+		for (chip_idx = 0; pci_tbl[chip_idx].name; chip_idx++) {
+			struct pci_id_info *chip = &pci_tbl[chip_idx];
+			if ((pci_id & chip->id.pci_mask) == chip->id.pci
+				&& (subsys_id & chip->id.subsystem_mask) == chip->id.subsystem
+				&& (pci_rev & chip->id.revision_mask) == chip->id.revision)
+				break;
+		}
+		if (pci_tbl[chip_idx].name) 		/* Compiled out! */
+			break;
+	}
+	if (dp == 0) {
+		printk(KERN_WARNING "No driver match for device %8.8x at %d/%d.\n",
+			   pci_id, bus, devfn);
+		return 0;
+	}
+	pci_flags = pci_tbl[chip_idx].pci_flags;
+	pcibios_read_config_dword(bus, devfn, ((pci_flags >> 2) & 0x1C) + 0x10,
+							  &pciaddr);
+	if ((pciaddr & PCI_BASE_ADDRESS_SPACE_IO)) {
+		ioaddr = pciaddr & PCI_BASE_ADDRESS_IO_MASK;
+	} else
+		ioaddr = (long)ioremap(pciaddr & PCI_BASE_ADDRESS_MEM_MASK,
+							   pci_tbl[chip_idx].io_size);
+	if (ioaddr == 0 || irq == 0) {
+		printk(KERN_ERR "The %s at %d/%d was not assigned an %s.\n"
+			   KERN_ERR "  It will not be activated.\n",
+			   pci_tbl[chip_idx].name, bus, devfn,
+			   ioaddr == 0 ? "address" : "IRQ");
+		return NULL;
+	}
+	printk(KERN_INFO "Found a %s at %d/%d address 0x%x->0x%lx IRQ %d.\n",
+		   pci_tbl[chip_idx].name, bus, devfn, pciaddr, ioaddr, irq);
+	{
+		u16 pci_command;
+		pcibios_read_config_word(bus, devfn, PCI_COMMAND, &pci_command);
+		printk(KERN_INFO "%s at %d/%d command 0x%x.\n",
+		   pci_tbl[chip_idx].name, bus, devfn, pci_command);
+	}
+
+	newdev = drv_id->probe1(pci_find_slot(bus, devfn), 0,
+							ioaddr, irq, chip_idx, 0);
+	if (newdev) {
+		struct registered_pci_device *hsdev =
+			kmalloc(sizeof(struct registered_pci_device), GFP_KERNEL);
+		if (drv_id->pci_class == PCI_CLASS_NETWORK_ETHERNET<<8)
+			strcpy(hsdev->node.dev_name, ((struct net_device *)newdev)->name);
+		hsdev->node.major = hsdev->node.minor = 0;
+		hsdev->node.next = NULL;
+		hsdev->drv_info = drv_id;
+		hsdev->dev_instance = newdev;
+		hsdev->next = root_pci_devs;
+		root_pci_devs = hsdev;
+		drv_id->pwr_event(newdev, DRV_ATTACH);
+		MOD_INC_USE_COUNT;
+		return &hsdev->node;
+	}
+	return NULL;
+}
+
+/* Add/remove a driver ID structure to our private list of known drivers. */
+int do_cb_register(struct drv_id_info *did)
+{
+	struct driver_operations *dop;
+	struct drv_shim *dshim = kmalloc(sizeof(*dshim), GFP_KERNEL);
+	if (dshim == 0)
+		return 0;
+	if (debug > 1)
+		printk(KERN_INFO "Registering driver support for '%s'.\n",
+			   did->name);
+	MOD_INC_USE_COUNT;
+	dshim->did = did;
+	dop = &dshim->drv_ops;
+	dop->name = (char *)did->name;
+	dop->attach = drv_attach;
+	dop->suspend = drv_suspend;
+	dop->resume = drv_resume;
+	dop->detach = drv_detach;
+	dshim->next = root_drv_id;
+	root_drv_id = dshim;
+	return register_driver(dop);
+}
+
+void do_cb_unregister(struct drv_id_info *did)
+{
+	struct drv_shim **dp;
+	for (dp = &root_drv_id; *dp; dp = &(*dp)->next)
+		if ((*dp)->did == did) {
+			struct drv_shim *dshim = *dp;
+			unregister_driver(&dshim->drv_ops);
+			*dp = dshim->next;
+			kfree(dshim);
+			MOD_DEC_USE_COUNT;
+			return;
+		}
+}
+
+extern int (*register_hotswap_hook)(struct drv_id_info *did);
+extern void (*unregister_hotswap_hook)(struct drv_id_info *did);
+
+int (*old_cb_hook)(struct drv_id_info *did);
+void (*old_un_cb_hook)(struct drv_id_info *did);
+
+int init_module(void)
+{
+	if (debug)
+		printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+	old_cb_hook = register_hotswap_hook;
+	old_un_cb_hook = unregister_hotswap_hook;
+	register_hotswap_hook = do_cb_register;
+	unregister_hotswap_hook = do_cb_unregister;
+	return 0;
+}
+void cleanup_module(void)
+{
+	register_hotswap_hook = 	old_cb_hook;
+	unregister_hotswap_hook = old_un_cb_hook;
+	return;
+}
+
+
+/*
+ * Local variables:
+ *  compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c cb_shim.c -I/usr/include/ -I/usr/src/pcmcia/include/"
+ *  c-indent-level: 4
+ *  c-basic-offset: 4
+ *  tab-width: 4
+ * End:
+ */
+
/
diff -uNr net/drivers/net/intel-gige.c linux-2.4.20/drivers/net/intel-gige.c
--- net/drivers/net/intel-gige.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.4.20/drivers/net/intel-gige.c	2003-01-14 20:29:34.000000000 -0500
@@ -0,0 +1,1451 @@
+/* intel-gige.c: A Linux device driver for Intel Gigabit Ethernet adapters. */
+/*
+	Written 2000-2002 by Donald Becker.
+	Copyright Scyld Computing Corporation.
+
+	This software may be used and distributed according to the terms of
+	the GNU General Public License (GPL), incorporated herein by reference.
+	You should have received a copy of the GPL with this file.
+	Drivers based on or derived from this code fall under the GPL and must
+	retain the authorship, copyright and license notice.  This file is not
+	a complete program and may only be used when the entire operating
+	system is licensed under the GPL.
+
+	The author may be reached as becker@scyld.com, or C/O
+	Scyld Computing Corporation
+	410 Severn Ave., Suite 210
+	Annapolis MD 21403
+
+	Support information and updates available at
+	http://www.scyld.com/network/ethernet.html
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"intel-gige.c:v0.14 11/17/2002 Written by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+"  http://www.scyld.com/network/ethernet.html\n";
+
+/* Automatically extracted configuration info:
+probe-func: igige_probe
+config-in: tristate 'Intel PCI Gigabit Ethernet support' CONFIG_IGIGE
+
+c-help-name: Intel PCI Gigabit Ethernet support
+c-help-symbol: CONFIG_IGIGE
+c-help: This driver is for the Intel PCI Gigabit Ethernet
+c-help: adapter series.
+c-help: More specific information and updates are available from 
+c-help: http://www.scyld.com/network/drivers.html
+*/
+
+/* The user-configurable values.
+   These may be modified when a driver module is loaded.*/
+
+/* Message enable level: 0..31 = no..all messages.  See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+   This chip has a 16 element perfect filter, and an unusual 4096 bit
+   hash filter based directly on address bits, not the Ethernet CRC.
+   It is costly to recalculate a large, frequently changing table.
+   However even a large table may useful in some nearly-static environments.
+*/
+static int multicast_filter_limit = 15;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+   Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak = 0;
+
+/* Used to pass the media type, etc.
+   The media type is passed in 'options[]'.  The full_duplex[] table only
+   allows the duplex to be forced on, implicitly disabling autonegotiation.
+   Setting the entry to zero still allows a link to autonegotiate to full
+   duplex.
+*/
+#define MAX_UNITS 8		/* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* The delay before announcing a Rx or Tx has completed. */
+static int rx_intr_holdoff = 0;
+static int tx_intr_holdoff = 128;
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two to avoid divides.
+   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+   Making the Tx ring too large decreases the effectiveness of channel
+   bonding and packet priority.
+   There are no ill effects from too-large receive rings. */
+#if ! defined(final_version)		/* Stress the driver. */
+#define TX_RING_SIZE	8
+#define TX_QUEUE_LEN	5
+#define RX_RING_SIZE	4
+#else
+#define TX_RING_SIZE	16
+#define TX_QUEUE_LEN	10		/* Limit ring entries actually used.  */
+#define RX_RING_SIZE	32
+#endif
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT  (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+   Do not change this value without good reason.  This is not a limit,
+   but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ		1536
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning  You must compile this file with the correct options!
+#warning  See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h>		/* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+/* Condensed operations for readability. */
+#define virt_to_le32desc(addr)  cpu_to_le32(virt_to_bus(addr))
+#define le32desc_to_virt(addr)  bus_to_virt(le32_to_cpu(addr))
+
+#if (LINUX_VERSION_CODE >= 0x20100)  &&  defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Intel Gigabit Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(max_interrupt_work,
+				 "Driver maximum events handled per interrupt");
+MODULE_PARM_DESC(full_duplex,
+				 "Non-zero to set forced full duplex (depricated).");
+MODULE_PARM_DESC(rx_copybreak,
+				 "Breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+				 "Multicast addresses before switching to Rx-all-multicast");
+
+/*
+				Theory of Operation
+
+I. Board Compatibility
+
+This driver is for the Intel Gigabit Ethernet adapter.
+
+II. Board-specific settings
+
+III. Driver operation
+
+IIIa. Descriptor Rings
+
+This driver uses two statically allocated fixed-size descriptor arrays
+treated as rings by the hardware. The ring sizes are set at compile time
+by RX/TX_RING_SIZE.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver uses a zero-copy receive and transmit scheme.
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack.  Buffers consumed this way are replaced by newly allocated
+skbuffs in a later phase of receives.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames.  New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets.  When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine.  Copying also preloads the cache, which is
+most useful with small frames.
+
+A subtle aspect of the operation is that the IP header at offset 14 in an
+ethernet frame isn't longword aligned for further processing.
+When unaligned buffers are permitted by the hardware (and always on copies)
+frames are put into the skbuff at an offset of "+2", 16-byte aligning
+the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control.
+One is the send-packet routine which is single-threaded by the queue
+layer.  The other thread is the interrupt handler, which is single
+threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring.  At the
+start of a transmit attempt netif_pause_tx_queue(dev) is called.  If the
+transmit attempt fills the Tx queue controlled by the chip, the driver
+informs the software queue layer by not calling
+netif_unpause_tx_queue(dev) on exit.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IIId. SMP semantics
+
+The following are serialized with respect to each other via the "xmit_lock".
+  dev->hard_start_xmit()	Transmit a packet
+  dev->tx_timeout()			Transmit watchdog for stuck Tx
+  dev->set_multicast_list()	Set the recieve filter.
+Note: The Tx timeout watchdog code is implemented by the timer routine in
+kernels up to 2.2.*.  In 2.4.* and later the timeout code is part of the
+driver interface.
+
+The following fall under the global kernel lock.  The module will not be
+unloaded during the call, unless a call with a potential reschedule e.g.
+kmalloc() is called.  No other synchronization assertion is made.
+  dev->open()
+  dev->do_ioctl()
+  dev->get_stats()
+Caution: The lock for dev->open() is commonly broken with request_irq() or
+kmalloc().  It is best to avoid any lock-breaking call in do_ioctl() and
+get_stats(), or additional module locking code must be implemented.
+
+The following is self-serialized (no simultaneous entry)
+  An handler registered with request_irq().
+
+IV. Notes
+
+IVb. References
+
+Intel has also released a Linux driver for this product, "e1000".
+
+IVc. Errata
+
+*/
+
+
+
+static void *igige_probe1(struct pci_dev *pdev, void *init_dev,
+						   long ioaddr, int irq, int chip_idx, int find_cnt);
+static int netdev_pwr_event(void *dev_instance, int event);
+enum chip_capability_flags { CanHaveMII=1, };
+#define PCI_IOTYPE ()
+
+static struct pci_id_info pci_id_tbl[] = {
+	{"Intel Gigabit Ethernet adapter", {0x10008086, 0xffffffff, },
+	 PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR0, 0x1ffff, 0},
+	{0,},						/* 0 terminated list. */
+};
+
+struct drv_id_info igige_drv_id = {
+	"intel-gige", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
+	igige_probe1, netdev_pwr_event };
+
+/* This hardware only has a PCI memory space BAR, not I/O space. */
+#ifdef USE_IO_OPS
+#error This driver only works with PCI memory space access.
+#endif
+
+/* Offsets to the device registers.
+*/
+enum register_offsets {
+	ChipCtrl=0x00, ChipStatus=0x08, EECtrl=0x10,
+	FlowCtrlAddrLo=0x028, FlowCtrlAddrHi=0x02c, FlowCtrlType=0x030,
+	VLANetherType=0x38,
+
+	RxAddrCAM=0x040,
+	IntrStatus=0x0C0,			/* Interrupt, Clear on Read, AKA ICR */
+	IntrEnable=0x0D0,			/* Set enable mask when '1' AKA IMS */
+	IntrDisable=0x0D8,			/* Clear enable mask when '1' */
+
+	RxControl=0x100,
+	RxQ0IntrDelay=0x108,		/* Rx list #0 interrupt delay timer. */
+	RxRingPtr=0x110,			/* Rx Desc. list #0 base address, 64bits */
+	RxRingLen=0x118,			/* Num bytes of Rx descriptors in ring.  */
+	RxDescHead=0x120,
+	RxDescTail=0x128,
+
+	RxQ1IntrDelay=0x130,		/* Rx list #1 interrupt delay timer. */
+	RxRing1Ptr=0x138,			/* Rx Desc. list #1 base address, 64bits */
+	RxRing1Len=0x140,			/* Num bytes of Rx descriptors in ring.  */
+	RxDesc1Head=0x148,
+	RxDesc1Tail=0x150,
+
+	FlowCtrlTimer=0x170, FlowCtrlThrshHi=0x160, FlowCtrlThrshLo=0x168, 
+	TxConfigReg=0x178,
+	RxConfigReg=0x180,
+	MulticastArray=0x200,
+
+	TxControl=0x400,
+	TxQState=0x408,				/* 64 bit queue state */
+	TxIPG=0x410,				/* Inter-Packet Gap */
+	TxRingPtr=0x420, TxRingLen=0x428,
+	TxDescHead=0x430, TxDescTail=0x438, TxIntrDelay=0x440,
+
+	RxCRCErrs=0x4000, RxMissed=0x4010,
+
+	TxStatus=0x408,
+	RxStatus=0x180,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+	IntrTxDone=0x0001,			/* Tx packet queued */
+	IntrLinkChange=0x0004,		/* Link Status Change */
+	IntrRxSErr=0x0008, 			/* Rx Symbol/Sequence error */
+	IntrRxEmpty=0x0010,			/* Rx queue 0 Empty */
+	IntrRxQ1Empty=0x0020,		/* Rx queue 1 Empty */
+	IntrRxDone=0x0080,			/* Rx Done, Queue 0*/
+	IntrRxDoneQ1=0x0100,		/* Rx Done, Queue 0*/
+	IntrPCIErr=0x0200,			/* PCI Bus Error */
+
+	IntrTxEmpty=0x0002,			/* Guess */
+	StatsMax=0x1000,			/* Unknown */
+};
+
+/* Bits in the RxFilterMode register. */
+enum rx_mode_bits {
+	RxCtrlReset=0x01, RxCtrlEnable=0x02, RxCtrlAllUnicast=0x08,
+	RxCtrlAllMulticast=0x10,
+	RxCtrlLoopback=0xC0,		/* We never configure loopback */
+	RxCtrlAcceptBroadcast=0x8000, 
+	/* Aliased names.*/
+	AcceptAllPhys=0x08,	AcceptAllMulticast=0x10, AcceptBroadcast=0x8000,
+	AcceptMyPhys=0,
+	AcceptMulticast=0,
+};
+
+/* The Rx and Tx buffer descriptors. */
+struct rx_desc {
+	u32 buf_addr;
+	u32 buf_addr_hi;
+	u32 csum_length;			/* Checksum and length */
+	u32 status;					/* Errors and status. */
+};
+
+struct tx_desc {
+	u32 buf_addr;
+	u32 buf_addr_hi;
+	u32 cmd_length;
+	u32 status;					/* And errors */
+};
+
+/* Bits in tx_desc.cmd_length */
+enum tx_cmd_bits {
+	TxDescEndPacket=0x02000000, TxCmdIntrDelay=0x80000000,
+	TxCmdAddCRC=0x02000000, TxCmdDoTx=0x13000000,
+};
+enum tx_status_bits {
+	TxDescDone=0x0001, TxDescEndPkt=0x0002,
+};
+
+/* Bits in tx_desc.status */
+enum rx_status_bits {
+	RxDescDone=0x0001, RxDescEndPkt=0x0002,
+};
+
+
+#define PRIV_ALIGN	15 	/* Required alignment mask */
+/* Use  __attribute__((aligned (L1_CACHE_BYTES)))  to maintain alignment
+   within the structure. */
+struct netdev_private {
+	struct net_device *next_module;		/* Link for devices of this type. */
+	void *priv_addr;					/* Unaligned address for kfree */
+	const char *product_name;
+	/* The addresses of receive-in-place skbuffs. */
+	struct sk_buff* rx_skbuff[RX_RING_SIZE];
+	/* The saved address of a sent-in-place packet/buffer, for later free(). */
+	struct sk_buff* tx_skbuff[TX_RING_SIZE];
+	struct net_device_stats stats;
+	struct timer_list timer;	/* Media monitoring timer. */
+	/* Keep frequently used values adjacent for cache effect. */
+	int msg_level;
+	int chip_id, drv_flags;
+	struct pci_dev *pci_dev;
+	int max_interrupt_work;
+	int intr_enable;
+	long in_interrupt;			/* Word-long for SMP locks. */
+
+	struct rx_desc *rx_ring;
+	struct rx_desc *rx_head_desc;
+	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
+	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
+	int rx_copybreak;
+
+	struct tx_desc *tx_ring;
+	unsigned int cur_tx, dirty_tx;
+	unsigned int tx_full:1;				/* The Tx queue is full. */
+
+	unsigned int rx_mode;
+	unsigned int tx_config;
+	int multicast_filter_limit;
+	/* These values track the transceiver/media in use. */
+	unsigned int full_duplex:1;			/* Full-duplex operation requested. */
+	unsigned int duplex_lock:1;
+	unsigned int medialock:1;			/* Do not sense media. */
+	unsigned int default_port:4;		/* Last dev->if_port value. */
+};
+
+static int  eeprom_read(long ioaddr, int location);
+static int  netdev_open(struct net_device *dev);
+static int  change_mtu(struct net_device *dev, int new_mtu);
+static void check_duplex(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void tx_timeout(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static int  start_tx(struct sk_buff *skb, struct net_device *dev);
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static void netdev_error(struct net_device *dev, int intr_status);
+static int  netdev_rx(struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int  netdev_close(struct net_device *dev);
+
+
+
+/* A list of our installed devices, for removing the driver module. */
+static struct net_device *root_net_dev = NULL;
+
+#ifndef MODULE
+/* You *must* rename this! */
+int skel_netdev_probe(struct net_device *dev)
+{
+	if (pci_drv_register(&igige_drv_id, dev) < 0)
+		return -ENODEV;
+	printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+	return 0;
+}
+#endif
+
+static void *igige_probe1(struct pci_dev *pdev, void *init_dev,
+						   long ioaddr, int irq, int chip_idx, int card_idx)
+{
+	struct net_device *dev;
+	struct netdev_private *np;
+	void *priv_mem;
+	int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+
+	dev = init_etherdev(init_dev, 0);
+	if (!dev)
+		return NULL;
+
+	printk(KERN_INFO "%s: %s at 0x%lx, ",
+		   dev->name, pci_id_tbl[chip_idx].name, ioaddr);
+
+	for (i = 0; i < 3; i++)
+		((u16*)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, i));
+	for (i = 0; i < 5; i++)
+		printk("%2.2x:", dev->dev_addr[i]);
+	printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+	/* Make certain elements e.g. descriptor lists are aligned. */
+	priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+	/* Check for the very unlikely case of no memory. */
+	if (priv_mem == NULL)
+		return NULL;
+
+	/* Do bogusness checks before this point.
+	   We do a request_region() only to register /proc/ioports info. */
+	request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
+
+	/* Reset the chip to erase previous misconfiguration. */
+	writel(0x04000000, ioaddr + ChipCtrl);
+
+	dev->base_addr = ioaddr;
+	dev->irq = irq;
+
+	dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+	memset(np, 0, sizeof(*np));
+	np->priv_addr = priv_mem;
+
+	np->next_module = root_net_dev;
+	root_net_dev = dev;
+
+	np->pci_dev = pdev;
+	np->chip_id = chip_idx;
+	np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
+	np->msg_level = (1 << debug) - 1;
+	np->rx_copybreak = rx_copybreak;
+	np->max_interrupt_work = max_interrupt_work;
+	np->multicast_filter_limit = multicast_filter_limit;
+
+	if (dev->mem_start)
+		option = dev->mem_start;
+
+	/* The lower four bits are the media type. */
+	if (option > 0) {
+		if (option & 0x2220)
+			np->full_duplex = 1;
+		np->default_port = option & 0x3330;
+		if (np->default_port)
+			np->medialock = 1;
+	}
+	if (card_idx < MAX_UNITS  &&  full_duplex[card_idx] > 0)
+		np->full_duplex = 1;
+
+	if (np->full_duplex)
+		np->duplex_lock = 1;
+
+#if ! defined(final_version) /* Dump the EEPROM contents during development. */
+	if (np->msg_level & NETIF_MSG_MISC) {
+		int sum = 0;
+		for (i = 0; i < 0x40; i++) {
+			int eeval = eeprom_read(ioaddr, i);
+			printk("%4.4x%s", eeval, i % 16 != 15 ? " " : "\n");
+			sum += eeval;
+		}
+		printk(KERN_DEBUG "%s:  EEPROM checksum %4.4X (expected value 0xBABA).\n",
+			   dev->name, sum & 0xffff);
+	}
+#endif
+
+	/* The chip-specific entries in the device structure. */
+	dev->open = &netdev_open;
+	dev->hard_start_xmit = &start_tx;
+	dev->stop = &netdev_close;
+	dev->get_stats = &get_stats;
+	dev->set_multicast_list = &set_rx_mode;
+	dev->do_ioctl = &mii_ioctl;
+	dev->change_mtu = &change_mtu;
+
+	/* Turn off VLAN and clear the VLAN filter. */
+	writel(0x04000000, ioaddr + VLANetherType);
+	for (i = 0x600; i < 0x800; i+=4)
+		writel(0, ioaddr + i);
+	np->tx_config = 0x80000020;
+	writel(np->tx_config, ioaddr + TxConfigReg);
+	{
+		int eeword10 = eeprom_read(ioaddr, 10);
+		writel(((eeword10 & 0x01e0) << 17) | ((eeword10 & 0x0010) << 3),
+			   ioaddr + ChipCtrl);
+	}
+
+	return dev;
+}
+
+
+/* Read the EEPROM interface with a serial bit streams generated by the
+   host processor. 
+   The example below is for the common 93c46 EEPROM, 64 16 bit words. */
+
+/* Delay between EEPROM clock transitions.
+   The effectivly flushes the write cache to prevent quick double-writes.
+*/
+#define eeprom_delay(ee_addr)	readl(ee_addr)
+
+enum EEPROM_Ctrl_Bits {
+	EE_ShiftClk=0x01, EE_ChipSelect=0x02, EE_DataIn=0x08, EE_DataOut=0x04,
+};
+#define EE_Write0 (EE_ChipSelect)
+#define EE_Write1 (EE_ChipSelect | EE_DataOut)
+
+/* The EEPROM commands include the alway-set leading bit. */
+enum EEPROM_Cmds { EE_WriteCmd=5, EE_ReadCmd=6, EE_EraseCmd=7, };
+
+static int eeprom_read(long addr, int location)
+{
+	int i;
+	int retval = 0;
+	long ee_addr = addr + EECtrl;
+	int read_cmd = ((EE_ReadCmd<<6) | location) << 16 ;
+	int cmd_len = 2+6+16;
+	u32 baseval = readl(ee_addr) & ~0x0f;
+
+	writel(EE_Write0 | baseval, ee_addr);
+
+	/* Shift the read command bits out. */
+	for (i = cmd_len; i >= 0; i--) {
+		int dataval = baseval |
+			((read_cmd & (1 << i)) ? EE_Write1 : EE_Write0);
+		writel(dataval, ee_addr);
+		eeprom_delay(ee_addr);
+		writel(dataval | EE_ShiftClk, ee_addr);
+		eeprom_delay(ee_addr);
+		retval = (retval << 1) | ((readl(ee_addr) & EE_DataIn) ? 1 : 0);
+	}
+
+	/* Terminate the EEPROM access. */
+	writel(baseval | EE_Write0, ee_addr);
+	writel(baseval & ~EE_ChipSelect, ee_addr);
+	return retval;
+}
+
+
+
+static int netdev_open(struct net_device *dev)
+{
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+
+	/* Some chips may need to be reset. */
+
+	MOD_INC_USE_COUNT;
+
+	if (np->tx_ring == 0)
+		np->tx_ring = (void *)get_free_page(GFP_KERNEL);
+	if (np->tx_ring == 0)
+		return -ENOMEM;
+	if (np->rx_ring == 0)
+		np->rx_ring = (void *)get_free_page(GFP_KERNEL);
+	if (np->tx_ring == 0) {
+		free_page((long)np->tx_ring);
+		return -ENOMEM;
+	}
+
+	/* Note that both request_irq() and init_ring() call kmalloc(), which
+	   break the global kernel lock protecting this routine. */
+	if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
+		MOD_DEC_USE_COUNT;
+		return -EAGAIN;
+	}
+
+	if (np->msg_level & NETIF_MSG_IFUP)
+		printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
+			   dev->name, dev->irq);
+
+	init_ring(dev);
+
+	writel(0, ioaddr + RxControl);
+	writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
+#if ADDRLEN == 64
+	writel(virt_to_bus(np->rx_ring) >> 32, ioaddr + RxRingPtr + 4);
+#else
+	writel(0, ioaddr + RxRingPtr + 4);
+#endif
+
+	writel(RX_RING_SIZE * sizeof(struct rx_desc), ioaddr + RxRingLen);
+	writel(0x80000000 | rx_intr_holdoff, ioaddr + RxQ0IntrDelay);
+	writel(0, ioaddr + RxDescHead);
+	writel(np->dirty_rx + RX_RING_SIZE, ioaddr + RxDescTail);
+
+	/* Zero the unused Rx ring #1. */
+	writel(0, ioaddr + RxQ1IntrDelay);
+	writel(0, ioaddr + RxRing1Ptr);
+	writel(0, ioaddr + RxRing1Ptr + 4);
+	writel(0, ioaddr + RxRing1Len);
+	writel(0, ioaddr + RxDesc1Head);
+	writel(0, ioaddr + RxDesc1Tail);
+
+	/* Use 0x002000FA for half duplex. */
+	writel(0x000400FA, ioaddr + TxControl);
+
+	writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
+#if ADDRLEN == 64
+	writel(virt_to_bus(np->tx_ring) >> 32, ioaddr + TxRingPtr + 4);
+#else
+	writel(0, ioaddr + TxRingPtr + 4);
+#endif
+
+	writel(TX_RING_SIZE * sizeof(struct tx_desc), ioaddr + TxRingLen);
+	writel(0, ioaddr + TxDescHead);
+	writel(0, ioaddr + TxDescTail);
+	writel(0, ioaddr + TxQState);
+	writel(0, ioaddr + TxQState + 4);
+
+	/* Set IPG register with Ethernet standard values. */
+	writel(0x00A0080A, ioaddr + TxIPG);
+	/* The delay before announcing a Tx has completed. */
+	writel(tx_intr_holdoff, ioaddr + TxIntrDelay);
+
+	writel(((u32*)dev->dev_addr)[0], ioaddr + RxAddrCAM);
+	writel(0x80000000 | ((((u32*)dev->dev_addr)[1]) & 0xffff),
+		   ioaddr + RxAddrCAM + 4);
+
+	/* Initialize other registers. */
+	/* Configure the PCI bus bursts and FIFO thresholds. */
+
+	if (dev->if_port == 0)
+		dev->if_port = np->default_port;
+
+	np->in_interrupt = 0;
+
+	np->rx_mode = RxCtrlEnable;
+	set_rx_mode(dev);
+
+	/* Tx mode */
+	np->tx_config = 0x80000020;
+	writel(np->tx_config, ioaddr + TxConfigReg);
+
+	/* Flow control */
+	writel(0x00C28001, ioaddr + FlowCtrlAddrLo);
+	writel(0x00000100, ioaddr + FlowCtrlAddrHi);
+	writel(0x8808, ioaddr + FlowCtrlType);
+	writel(0x0100, ioaddr + FlowCtrlTimer);
+	writel(0x8000, ioaddr + FlowCtrlThrshHi);
+	writel(0x4000, ioaddr + FlowCtrlThrshLo);
+
+	netif_start_tx_queue(dev);
+
+	/* Enable interrupts by setting the interrupt mask. */
+	writel(IntrTxDone | IntrLinkChange | IntrRxDone | IntrPCIErr
+		   | IntrRxEmpty | IntrRxSErr, ioaddr + IntrEnable);
+
+	/*	writel(1, dev->base_addr + RxCmd);*/
+
+	if (np->msg_level & NETIF_MSG_IFUP)
+		printk(KERN_DEBUG "%s: Done netdev_open(), status: %x Rx %x Tx %x.\n",
+			   dev->name, (int)readl(ioaddr + ChipStatus),
+			   (int)readl(ioaddr + RxStatus), (int)readl(ioaddr + TxStatus));
+
+	/* Set the timer to check for link beat. */
+	init_timer(&np->timer);
+	np->timer.expires = jiffies + 3*HZ;
+	np->timer.data = (unsigned long)dev;
+	np->timer.function = &netdev_timer;				/* timer handler */
+	add_timer(&np->timer);
+
+	return 0;
+}
+
+/* Update for jumbo frames...
+   Changing the MTU while active is not allowed.
+ */
+static int change_mtu(struct net_device *dev, int new_mtu)
+{
+	if ((new_mtu < 68) || (new_mtu > 1500))
+		return -EINVAL;
+	if (netif_running(dev))
+		return -EBUSY;
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+static void check_duplex(struct net_device *dev)
+{
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int chip_ctrl = readl(ioaddr + ChipCtrl);
+	int rx_cfg = readl(ioaddr + RxConfigReg);
+	int tx_cfg = readl(ioaddr + TxConfigReg);
+#if 0
+	int chip_status = readl(ioaddr + ChipStatus);
+#endif
+
+	if (np->msg_level & NETIF_MSG_LINK)
+		printk(KERN_DEBUG "%s:  Link changed status.  Ctrl %x rxcfg %8.8x "
+			   "txcfg %8.8x.\n",
+			   dev->name, chip_ctrl, rx_cfg, tx_cfg);
+	if (np->medialock) {
+		if (np->full_duplex)
+			;
+	}
+	/* writew(new_tx_mode, ioaddr + TxMode); */
+}
+
+static void netdev_timer(unsigned long data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int next_tick = 10*HZ;
+
+	if (np->msg_level & NETIF_MSG_TIMER) {
+		printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x, "
+			   "Tx %x Rx %x.\n",
+			   dev->name, (int)readl(ioaddr + ChipStatus),
+			   (int)readl(ioaddr + TxStatus), (int)readl(ioaddr + RxStatus));
+	}
+	/* This will either have a small false-trigger window or will not catch
+	   tbusy incorrectly set when the queue is empty. */
+	if ((jiffies - dev->trans_start) > TX_TIMEOUT  &&
+		(np->cur_tx - np->dirty_tx > 0  ||
+		 netif_queue_paused(dev)) ) {
+		tx_timeout(dev);
+	}
+	check_duplex(dev);
+	np->timer.expires = jiffies + next_tick;
+	add_timer(&np->timer);
+}
+
+static void tx_timeout(struct net_device *dev)
+{
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+
+	printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
+		   " resetting...\n", dev->name, (int)readl(ioaddr + ChipStatus));
+
+#ifndef __alpha__
+	if (np->msg_level & NETIF_MSG_TX_ERR) {
+		int i;
+		printk(KERN_DEBUG "  Tx registers: ");
+		for (i = 0x400; i < 0x444; i += 8)
+			printk(" %8.8x", (int)readl(ioaddr + i));
+		printk("\n"KERN_DEBUG "  Rx ring %p: ", np->rx_ring);
+		for (i = 0; i < RX_RING_SIZE; i++)
+			printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
+		printk("\n"KERN_DEBUG"  Tx ring %p: ", np->tx_ring);
+		for (i = 0; i < TX_RING_SIZE; i++)
+			printk(" %4.4x", np->tx_ring[i].status);
+		printk("\n");
+	}
+#endif
+
+	/* Perhaps we should reinitialize the hardware here. */
+	dev->if_port = 0;
+	/* Stop and restart the chip's Tx processes . */
+
+	/* Trigger an immediate transmit demand. */
+
+	dev->trans_start = jiffies;
+	np->stats.tx_errors++;
+	return;
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	int i;
+
+	np->tx_full = 0;
+	np->cur_rx = np->cur_tx = 0;
+	np->dirty_rx = np->dirty_tx = 0;
+
+	np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+	np->rx_head_desc = &np->rx_ring[0];
+
+	/* Initialize all Rx descriptors. */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		np->rx_skbuff[i] = 0;
+	}
+
+	/* The number of ring descriptors is set by the ring length register,
+	   thus the chip does not use 'next_desc' chains. */
+
+	/* Fill in the Rx buffers.  Allocation failures are acceptable. */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+		np->rx_skbuff[i] = skb;
+		if (skb == NULL)
+			break;
+		skb->dev = dev;			/* Mark as being used by this device. */
+		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
+		np->rx_ring[i].buf_addr = virt_to_le32desc(skb->tail);
+		np->rx_ring[i].buf_addr_hi = 0;
+		np->rx_ring[i].status = 0;
+	}
+	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		np->tx_skbuff[i] = 0;
+		np->tx_ring[i].status = 0;
+	}
+	return;
+}
+
+static int start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	unsigned entry;
+
+	/* Block a timer-based transmit from overlapping.  This happens when
+	   packets are presumed lost, and we use this check the Tx status. */
+	if (netif_pause_tx_queue(dev) != 0) {
+		/* This watchdog code is redundant with the media monitor timer. */
+		if (jiffies - dev->trans_start > TX_TIMEOUT)
+			tx_timeout(dev);
+		return 1;
+	}
+
+	/* Calculate the next Tx descriptor entry. */
+	entry = np->cur_tx % TX_RING_SIZE;
+
+	np->tx_skbuff[entry] = skb;
+
+	/* Note: Descriptors may be uncached.  Write each field only once. */
+	np->tx_ring[entry].buf_addr = virt_to_le32desc(skb->data);
+	np->tx_ring[entry].buf_addr_hi = 0;
+	np->tx_ring[entry].cmd_length = cpu_to_le32(TxCmdDoTx | skb->len);
+	np->tx_ring[entry].status = 0;
+
+	/* Non-CC architectures: explicitly flush descriptor and packet.
+	   cache_flush(np->tx_ring[entry], sizeof np->tx_ring[entry]);
+	   cache_flush(skb->data, skb->len);
+	*/
+
+	np->cur_tx++;
+	if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
+		np->tx_full = 1;
+		/* Check for a just-cleared queue. */
+		if (np->cur_tx - (volatile int)np->dirty_tx < TX_QUEUE_LEN - 2) {
+			netif_unpause_tx_queue(dev);
+			np->tx_full = 0;
+		} else
+			netif_stop_tx_queue(dev);
+	} else
+		netif_unpause_tx_queue(dev);		/* Typical path */
+
+	/* Inform the chip we have another Tx. */
+	if (np->msg_level & NETIF_MSG_TX_QUEUED)
+		printk(KERN_DEBUG "%s: Tx queued to slot %d, desc tail now %d "
+			   "writing %d.\n",
+			   dev->name, entry, (int)readl(dev->base_addr + TxDescTail),
+			   np->cur_tx % TX_RING_SIZE);
+	writel(np->cur_tx % TX_RING_SIZE, dev->base_addr + TxDescTail);
+
+	dev->trans_start = jiffies;
+
+	if (np->msg_level & NETIF_MSG_TX_QUEUED) {
+		printk(KERN_DEBUG "%s: Transmit frame #%d (%x) queued in slot %d.\n",
+			   dev->name, np->cur_tx, (int)virt_to_bus(&np->tx_ring[entry]),
+			   entry);
+	}
+	return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+   after the Tx thread. */
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+	struct net_device *dev = (struct net_device *)dev_instance;
+	struct netdev_private *np;
+	long ioaddr;
+	int work_limit;
+
+	ioaddr = dev->base_addr;
+	np = (struct netdev_private *)dev->priv;
+	work_limit = np->max_interrupt_work;
+
+#if defined(__i386__)  &&  LINUX_VERSION_CODE < 0x020300
+	/* A lock to prevent simultaneous entry bug on Intel SMP machines. */
+	if (test_and_set_bit(0, (void*)&dev->interrupt)) {
+		printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
+			   dev->name);
+		dev->interrupt = 0;	/* Avoid halting machine. */
+		return;
+	}
+#endif
+
+	do {
+		u32 intr_status = readl(ioaddr + IntrStatus);
+
+		if (np->msg_level & NETIF_MSG_INTR)
+			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
+				   dev->name, intr_status);
+
+		if (intr_status == 0 || intr_status == 0xffffffff)
+			break;
+
+		if (intr_status & IntrRxDone)
+			netdev_rx(dev);
+
+		for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+			int entry = np->dirty_tx % TX_RING_SIZE;
+			if (np->tx_ring[entry].status == 0)
+				break;
+			if (np->msg_level & NETIF_MSG_TX_DONE)
+				printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
+					   dev->name, np->tx_ring[entry].status);
+			np->stats.tx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+			np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+#endif
+			/* Free the original skb. */
+			dev_free_skb_irq(np->tx_skbuff[entry]);
+			np->tx_skbuff[entry] = 0;
+		}
+		/* Note the 4 slot hysteresis to mark the queue non-full. */
+		if (np->tx_full  &&  np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+			/* The ring is no longer full, allow new TX entries. */
+			np->tx_full = 0;
+			netif_resume_tx_queue(dev);
+		}
+
+		/* Abnormal error summary/uncommon events handlers. */
+		if (intr_status & (IntrPCIErr | IntrLinkChange | StatsMax))
+			netdev_error(dev, intr_status);
+
+		if (--work_limit < 0) {
+			printk(KERN_WARNING "%s: Too much work at interrupt, "
+				   "status=0x%4.4x.\n",
+				   dev->name, intr_status);
+			break;
+		}
+	} while (1);
+
+	if (np->msg_level & NETIF_MSG_INTR)
+		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+			   dev->name, (int)readl(ioaddr + IntrStatus));
+
+#if defined(__i386__)  &&  LINUX_VERSION_CODE < 0x020300
+	clear_bit(0, (void*)&dev->interrupt);
+#endif
+	return;
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+   for clarity and better register allocation. */
+static int netdev_rx(struct net_device *dev)
+{
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	int entry = np->cur_rx % RX_RING_SIZE;
+	int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
+
+	if (np->msg_level & NETIF_MSG_RX_STATUS) {
+		printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
+			   entry, np->rx_ring[entry].status);
+	}
+
+	/* If EOP is set on the next entry, it's a new packet. Send it up. */
+	while (np->rx_head_desc->status & cpu_to_le32(RxDescDone)) {
+		struct rx_desc *desc = np->rx_head_desc;
+		u32 desc_status = le32_to_cpu(desc->status);
+		int data_size = le32_to_cpu(desc->csum_length);
+
+		if (np->msg_level & NETIF_MSG_RX_STATUS)
+			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
+				   desc_status);
+		if (--boguscnt < 0)
+			break;
+		if ( ! (desc_status & RxDescEndPkt)) {
+			printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
+				   "multiple buffers, entry %#x length %d status %4.4x!\n",
+				   dev->name, np->cur_rx, data_size, desc_status);
+			np->stats.rx_length_errors++;
+		} else {
+			struct sk_buff *skb;
+			/* Reported length should omit the CRC. */
+			int pkt_len = (data_size & 0xffff) - 4;
+
+#ifndef final_version
+			if (np->msg_level & NETIF_MSG_RX_STATUS)
+				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
+					   " of %d, bogus_cnt %d.\n",
+					   pkt_len, data_size, boguscnt);
+#endif
+			/* Check if the packet is long enough to accept without copying
+			   to a minimally-sized skbuff. */
+			if (pkt_len < np->rx_copybreak
+				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+				skb->dev = dev;
+				skb_reserve(skb, 2);	/* 16 byte align the IP header */
+#if HAS_IP_COPYSUM			/* Call copy + cksum if available. */
+				eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+				skb_put(skb, pkt_len);
+#else
+				memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
+					   pkt_len);
+#endif
+			} else {
+				char *temp = skb_put(skb = np->rx_skbuff[entry], pkt_len);
+				np->rx_skbuff[entry] = NULL;
+#ifndef final_version				/* Remove after testing. */
+				if (le32desc_to_virt(np->rx_ring[entry].buf_addr) != temp)
+					printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
+						   "do not match in netdev_rx: %p vs. %p / %p.\n",
+						   dev->name,
+						   le32desc_to_virt(np->rx_ring[entry].buf_addr),
+						   skb->head, temp);
+#endif
+			}
+#ifndef final_version				/* Remove after testing. */
+			/* You will want this info for the initial debug. */
+			if (np->msg_level & NETIF_MSG_PKTDATA)
+				printk(KERN_DEBUG "  Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
+					   "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
+					   "%d.%d.%d.%d.\n",
+					   skb->data[0], skb->data[1], skb->data[2], skb->data[3],
+					   skb->data[4], skb->data[5], skb->data[6], skb->data[7],
+					   skb->data[8], skb->data[9], skb->data[10],
+					   skb->data[11], skb->data[12], skb->data[13],
+					   skb->data[14], skb->data[15], skb->data[16],
+					   skb->data[17]);
+#endif
+			skb->protocol = eth_type_trans(skb, dev);
+			/* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
+			netif_rx(skb);
+			dev->last_rx = jiffies;
+			np->stats.rx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+			np->stats.rx_bytes += pkt_len;
+#endif
+		}
+		entry = (++np->cur_rx) % RX_RING_SIZE;
+		np->rx_head_desc = &np->rx_ring[entry];
+	}
+
+	/* Refill the Rx ring buffers. */
+	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+		struct sk_buff *skb;
+		entry = np->dirty_rx % RX_RING_SIZE;
+		if (np->rx_skbuff[entry] == NULL) {
+			skb = dev_alloc_skb(np->rx_buf_sz);
+			np->rx_skbuff[entry] = skb;
+			if (skb == NULL)
+				break;				/* Better luck next round. */
+			skb->dev = dev;			/* Mark as being used by this device. */
+			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
+			np->rx_ring[entry].buf_addr = virt_to_le32desc(skb->tail);
+		}
+		np->rx_ring[entry].status = 0;
+	}
+
+	/* Restart Rx engine if stopped. */
+	/* writel(1, dev->base_addr + RxCmd); */
+	return 0;
+}
+
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+	long ioaddr = dev->base_addr;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+
+	if (intr_status & IntrLinkChange) {
+		int chip_ctrl = readl(ioaddr + ChipCtrl);
+		if (np->msg_level & NETIF_MSG_LINK)
+			printk(KERN_ERR "%s: Link changed: Autonegotiation on-going.\n",
+				   dev->name);
+		if (chip_ctrl & 1)
+			netif_link_up(dev);
+		else
+			netif_link_down(dev);
+		check_duplex(dev);
+	}
+	if (intr_status & StatsMax) {
+		get_stats(dev);
+	}
+	if ((intr_status & ~(IntrLinkChange|StatsMax))
+		&& (np->msg_level & NETIF_MSG_DRV))
+		printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
+			   dev->name, intr_status);
+	/* Hmmmmm, it's not clear how to recover from PCI faults. */
+	if (intr_status & IntrPCIErr)
+		np->stats.tx_fifo_errors++;
+}
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+	long ioaddr = dev->base_addr;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	int crc_errs = readl(ioaddr + RxCRCErrs);
+
+	if (crc_errs != 0xffffffff) {
+		/* We need not lock this segment of code for SMP.
+		   The non-atomic-add vulnerability is very small
+		   and statistics are non-critical. */
+		np->stats.rx_crc_errors	+= readl(ioaddr + RxCRCErrs);
+		np->stats.rx_missed_errors	+= readl(ioaddr + RxMissed);
+	}
+
+	return &np->stats;
+}
+
+/* The little-endian AUTODIN II ethernet CRC calculations.
+   A big-endian version is also available.
+   This is slow but compact code.  Do not use this routine for bulk data,
+   use a table-based routine instead.
+   This is common code and should be moved to net/core/crc.c.
+   Chips may use the upper or lower CRC bits, and may reverse and/or invert
+   them.  Select the endian-ness that results in minimal calculations.
+*/
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+static inline unsigned ether_crc_le(int length, unsigned char *data)
+{
+	unsigned int crc = 0xffffffff;	/* Initial value. */
+	while(--length >= 0) {
+		unsigned char current_octet = *data++;
+		int bit;
+		for (bit = 8; --bit >= 0; current_octet >>= 1) {
+			if ((crc ^ current_octet) & 1) {
+				crc >>= 1;
+				crc ^= ethernet_polynomial_le;
+			} else
+				crc >>= 1;
+		}
+	}
+	return crc;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+	long ioaddr = dev->base_addr;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	u32 new_mc_filter[128];			/* Multicast filter table */
+	u32 new_rx_mode = np->rx_mode;
+
+	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
+		/* Unconditionally log net taps. */
+		printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+		new_rx_mode |=
+			RxCtrlAcceptBroadcast | RxCtrlAllMulticast | RxCtrlAllUnicast;
+	} else if ((dev->mc_count > np->multicast_filter_limit)
+			   ||  (dev->flags & IFF_ALLMULTI)) {
+		/* Too many to match, or accept all multicasts. */
+		new_rx_mode &= ~RxCtrlAllUnicast;
+		new_rx_mode |= RxCtrlAcceptBroadcast | RxCtrlAllMulticast;
+	} else {
+		struct dev_mc_list *mclist;
+		int i;
+		memset(new_mc_filter, 0, sizeof(new_mc_filter));
+		for (i = 0, mclist = dev->mc_list; mclist && i < 15;
+			 i++, mclist = mclist->next) {
+			writel(((u32*)mclist->dmi_addr)[0], ioaddr + RxAddrCAM + 8 + i*8);
+			writel((((u32*)mclist->dmi_addr)[1] & 0xffff) | 0x80000000,
+				   ioaddr + RxAddrCAM + 12 + i*8);
+		}
+		for (; mclist && i < dev->mc_count; i++, mclist = mclist->next) {
+			set_bit(((u32*)mclist->dmi_addr)[1] & 0xfff,
+					new_mc_filter);
+		}
+		new_rx_mode &= ~RxCtrlAllUnicast | RxCtrlAllMulticast;
+		new_rx_mode |= RxCtrlAcceptBroadcast;
+		if (dev->mc_count > 15)
+			for (i = 0; i < 128; i++)
+				writel(new_mc_filter[i], ioaddr + MulticastArray + (i<<2));
+	}
+	if (np->rx_mode != new_rx_mode)
+		writel(np->rx_mode = new_rx_mode, ioaddr + RxControl);
+}
+
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	u32 *data32 = (void *)&rq->ifr_data;
+
+	switch(cmd) {
+	case SIOCGPARAMS:
+		data32[0] = np->msg_level;
+		data32[1] = np->multicast_filter_limit;
+		data32[2] = np->max_interrupt_work;
+		data32[3] = np->rx_copybreak;
+		return 0;
+	case SIOCSPARAMS:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		np->msg_level = data32[0];
+		np->multicast_filter_limit = data32[1];
+		np->max_interrupt_work = data32[2];
+		np->rx_copybreak = data32[3];
+		return 0;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int netdev_close(struct net_device *dev)
+{
+	long ioaddr = dev->base_addr;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	int i;
+
+	netif_stop_tx_queue(dev);
+
+	if (np->msg_level & NETIF_MSG_IFDOWN) {
+		printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x "
+			   "Rx %4.4x Int %2.2x.\n",
+			   dev->name, (int)readl(ioaddr + TxStatus),
+			   (int)readl(ioaddr + RxStatus), (int)readl(ioaddr + IntrStatus));
+		printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
+			   dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
+	}
+
+	/* Disable interrupts by clearing the interrupt mask. */
+	writel(~0, ioaddr + IntrDisable);
+	readl(ioaddr + IntrStatus);
+
+	/* Reset everything. */
+	writel(0x04000000, ioaddr + ChipCtrl);
+
+	del_timer(&np->timer);
+
+#ifdef __i386__
+	if (np->msg_level & NETIF_MSG_IFDOWN) {
+		printk("\n"KERN_DEBUG"  Tx ring at %8.8x:\n",
+			   (int)virt_to_bus(np->tx_ring));
+		for (i = 0; i < TX_RING_SIZE; i++)
+			printk(" #%d desc. buf %8.8x, length %8.8x, status %8.8x.\n",
+				   i, np->tx_ring[i].buf_addr, np->tx_ring[i].cmd_length,
+				   np->tx_ring[i].status);
+		printk("\n"KERN_DEBUG "  Rx ring %8.8x:\n",
+			   (int)virt_to_bus(np->rx_ring));
+		for (i = 0; i < RX_RING_SIZE; i++) {
+			printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
+				   i, np->rx_ring[i].csum_length,
+				   np->rx_ring[i].status, np->rx_ring[i].buf_addr);
+			if (np->rx_ring[i].buf_addr) {
+				if (*(u8*)np->rx_skbuff[i]->tail != 0x69) {
+					u16 *pkt_buf = (void *)np->rx_skbuff[i]->tail;
+					int j;
+					for (j = 0; j < 0x50; j++)
+						printk(" %4.4x", pkt_buf[j]);
+					printk("\n");
+				}
+			}
+		}
+	}
+#endif /* __i386__ debugging only */
+
+	free_irq(dev->irq, dev);
+
+	/* Free all the skbuffs in the Rx queue. */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		np->rx_ring[i].status = 0;
+		np->rx_ring[i].buf_addr = 0xBADF00D0; /* An invalid address. */
+		if (np->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+			np->rx_skbuff[i]->free = 1;
+#endif
+			dev_free_skb(np->rx_skbuff[i]);
+		}
+		np->rx_skbuff[i] = 0;
+	}
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		if (np->tx_skbuff[i])
+			dev_free_skb(np->tx_skbuff[i]);
+		np->tx_skbuff[i] = 0;
+	}
+
+	MOD_DEC_USE_COUNT;
+
+	return 0;
+}
+
+static int netdev_pwr_event(void *dev_instance, int event)
+{
+	struct net_device *dev = dev_instance;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+
+	if (np->msg_level & NETIF_MSG_LINK)
+		printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
+	switch(event) {
+	case DRV_ATTACH:
+		MOD_INC_USE_COUNT;
+		break;
+	case DRV_SUSPEND:
+		/* Disable interrupts, stop Tx and Rx. */
+		writel(~0, ioaddr + IntrDisable);
+		/* writel(2, ioaddr + RxCmd); */
+		/* writew(2, ioaddr + TxCmd); */
+		break;
+	case DRV_RESUME:
+		/* This is incomplete: the actions are very chip specific. */
+		set_rx_mode(dev);
+		break;
+	case DRV_DETACH: {
+		struct net_device **devp, **next;
+		if (dev->flags & IFF_UP) {
+			/* Some, but not all, kernel versions close automatically. */
+			dev_close(dev);
+			dev->flags &= ~(IFF_UP|IFF_RUNNING);
+		}
+		unregister_netdev(dev);
+		release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
+		iounmap((char *)dev->base_addr);
+		for (devp = &root_net_dev; *devp; devp = next) {
+			next = &((struct netdev_private *)(*devp)->priv)->next_module;
+			if (*devp == dev) {
+				*devp = *next;
+				break;
+			}
+		}
+		if (np->priv_addr)
+			kfree(np->priv_addr);
+		kfree(dev);
+		MOD_DEC_USE_COUNT;
+		break;
+	}
+	}
+
+	return 0;
+}
+
+
+#ifdef MODULE
+int init_module(void)
+{
+	/* Emit version even if no cards detected. */
+	printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+	return pci_drv_register(&igige_drv_id, NULL);
+}
+
+void cleanup_module(void)
+{
+	struct net_device *next_dev;
+
+	pci_drv_unregister(&igige_drv_id);
+
+	/* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+	while (root_net_dev) {
+		struct netdev_private *np = (void *)(root_net_dev->priv);
+		unregister_netdev(root_net_dev);
+		release_region(root_net_dev->base_addr,
+					   pci_id_tbl[np->chip_id].io_size);
+		iounmap((char *)(root_net_dev->base_addr));
+		next_dev = np->next_module;
+		if (np->tx_ring == 0)
+			free_page((long)np->tx_ring);
+		if (np->rx_ring == 0)
+			free_page((long)np->rx_ring);
+		if (np->priv_addr)
+			kfree(np->priv_addr);
+		kfree(root_net_dev);
+		root_net_dev = next_dev;
+	}
+}
+
+#endif  /* MODULE */
+
+/*
+ * Local variables:
+ *  compile-command: "make KERNVER=`uname -r` intel-gige.o"
+ *  compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c intel-gige.c"
+ *  simple-compile-command: "gcc -DMODULE -O6 -c intel-gige.c"
+ *  c-indent-level: 4
+ *  c-basic-offset: 4
+ *  tab-width: 4
+ * End:
+ */
diff -uNr net/drivers/net/kern_compat.h linux-2.4.20/drivers/net/kern_compat.h
--- net/drivers/net/kern_compat.h	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.4.20/drivers/net/kern_compat.h	2003-01-14 20:29:34.000000000 -0500
@@ -0,0 +1,284 @@
+#ifndef _KERN_COMPAT_H
+#define _KERN_COMPAT_H
+/* kern_compat.h: Linux PCI network adapter backward compatibility code. */
+/*
+	$Revision: 1.17 $ $Date: 2002/11/17 17:37:00 $
+
+	Kernel compatibility defines.
+	This file provides macros to mask the difference between kernel versions.
+	It is designed primarily to allow device drivers to be written so that
+	they work with a range of kernel versions.
+
+	Written 1999-2002 Donald Becker, Scyld Computing Corporation
+	This software may be used and distributed according to the terms
+	of the GNU General Public License (GPL), incorporated herein by
+	reference.  Drivers interacting with these functions are derivative
+	works and thus are covered the GPL.  They must include an explicit
+	GPL notice.
+
+	This code also provides inline scan and activate functions for PCI network
+	interfaces.  It has an interface identical to pci-scan.c, but is
+	intended as an include file to simplify using updated drivers with older
+	kernel versions.
+	This code version matches pci-scan.c:v0.05 9/16/99
+
+	The author may be reached as becker@scyld.com, or
+	Donald Becker
+	Scyld Computing Corporation
+	410 Severn Ave., Suite 210
+	Annapolis MD 21403
+
+	Other contributers:
+	<none>
+*/
+
+/* We try to use defined values to decide when an interface has changed or
+   added features, but we must have the kernel version number for a few. */
+#if ! defined(LINUX_VERSION_CODE)  ||  (LINUX_VERSION_CODE < 0x10000)
+#include <linux/version.h>
+#endif
+/* Older kernel versions didn't include modversions automatically. */
+#if LINUX_VERSION_CODE < 0x20300  &&  defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+
+/* There was no support for PCI address space mapping in 2.0, but the
+   Alpha needed it.  See the 2.2 documentation. */
+#if LINUX_VERSION_CODE < 0x20100  &&  ! defined(__alpha__)
+#define ioremap(a,b)\
+    (((unsigned long)(a) >= 0x100000) ? vremap(a,b) : (void*)(a))
+#define iounmap(v)\
+    do { if ((unsigned long)(v) >= 0x100000) vfree(v);} while (0)
+#endif
+
+/* Support for adding info about the purpose of and parameters for kernel
+   modules was added in 2.1. */
+#if LINUX_VERSION_CODE < 0x20115
+#define MODULE_AUTHOR(name)  extern int nonesuch
+#define MODULE_DESCRIPTION(string)  extern int nonesuch
+#define MODULE_PARM(varname, typestring)  extern int nonesuch
+#define MODULE_PARM_DESC(var,desc) extern int nonesuch
+#endif
+#if !defined(MODULE_LICENSE)
+#define MODULE_LICENSE(license) 	\
+static const char __module_license[] __attribute__((section(".modinfo"))) =   \
+"license=" license
+#endif
+#if !defined(MODULE_PARM_DESC)
+#define MODULE_PARM_DESC(var,desc)		\
+const char __module_parm_desc_##var[]		\
+__attribute__((section(".modinfo"))) =		\
+"parm_desc_" __MODULE_STRING(var) "=" desc
+#endif
+
+/* SMP and better multiarchitecture support were added.
+   Using an older kernel means we assume a little-endian uniprocessor.
+*/
+#if LINUX_VERSION_CODE < 0x20123
+#define hard_smp_processor_id() smp_processor_id()
+#define test_and_set_bit(val, addr) set_bit(val, addr)
+#define cpu_to_le16(val) (val)
+#define cpu_to_le32(val) (val)
+#define le16_to_cpu(val) (val)
+#define le16_to_cpus(val)		/* In-place conversion. */
+#define le32_to_cpu(val) (val)
+#define cpu_to_be16(val) ((((val) & 0xff) << 8) +  (((val) >> 8) & 0xff))
+#define cpu_to_be32(val) ((cpu_to_be16(val) << 16) + cpu_to_be16((val) >> 16))
+typedef long spinlock_t;
+#define SPIN_LOCK_UNLOCKED 0
+#define spin_lock(lock)
+#define spin_unlock(lock)
+#define spin_lock_irqsave(lock, flags)	do {save_flags(flags); cli();} while(0)
+#define spin_unlock_irqrestore(lock, flags) restore_flags(flags)
+#endif
+
+#if LINUX_VERSION_CODE <= 0x20139
+#define	net_device_stats enet_statistics
+#else
+#define NETSTATS_VER2
+#endif
+
+/* These are used by the netdrivers to report values from the
+   MII (Media Indpendent Interface) management registers.
+*/
+#ifndef SIOCGMIIPHY
+#define SIOCGMIIPHY (SIOCDEVPRIVATE)		/* Get the PHY in use. */
+#define SIOCGMIIREG (SIOCDEVPRIVATE+1) 		/* Read a PHY register. */
+#define SIOCSMIIREG (SIOCDEVPRIVATE+2) 		/* Write a PHY register. */
+#endif
+#ifndef SIOCGPARAMS
+#define SIOCGPARAMS (SIOCDEVPRIVATE+3) 		/* Read operational parameters. */
+#define SIOCSPARAMS (SIOCDEVPRIVATE+4) 		/* Set operational parameters. */
+#endif
+
+#if !defined(HAVE_NETIF_MSG)
+enum {
+	NETIF_MSG_DRV           = 0x0001,
+	NETIF_MSG_PROBE         = 0x0002,
+	NETIF_MSG_LINK          = 0x0004,
+	NETIF_MSG_TIMER         = 0x0008,
+	NETIF_MSG_IFDOWN        = 0x0010,
+	NETIF_MSG_IFUP          = 0x0020,
+	NETIF_MSG_RX_ERR        = 0x0040,
+	NETIF_MSG_TX_ERR        = 0x0080,
+	NETIF_MSG_TX_QUEUED     = 0x0100,
+	NETIF_MSG_INTR          = 0x0200,
+	NETIF_MSG_TX_DONE       = 0x0400,
+	NETIF_MSG_RX_STATUS     = 0x0800,
+	NETIF_MSG_PKTDATA       = 0x1000,
+	/* 2000 is reserved. */
+	NETIF_MSG_WOL           = 0x4000,
+	NETIF_MSG_MISC          = 0x8000,
+	NETIF_MSG_RXFILTER      = 0x10000,
+};
+#define NETIF_MSG_MAX 0x10000
+#endif
+
+#if !defined(NETIF_MSG_MAX) || NETIF_MSG_MAX < 0x8000
+#define NETIF_MSG_MISC 0x8000
+#endif
+#if !defined(NETIF_MSG_MAX) || NETIF_MSG_MAX < 0x10000
+#define NETIF_MSG_RXFILTER 0x10000
+#endif
+
+#if LINUX_VERSION_CODE < 0x20155
+#include <linux/bios32.h>
+#define PCI_SUPPORT_VER1
+/* A minimal version of the 2.2.* PCI support that handles configuration
+   space access.
+   Drivers that actually use pci_dev fields must do explicit compatibility.
+   Note that the struct pci_dev * "pointer" is actually a byte mapped integer!
+*/
+#if LINUX_VERSION_CODE < 0x20014
+struct pci_dev { int not_used; };
+#endif
+
+#define pci_find_slot(bus, devfn) (struct pci_dev*)((bus<<8) | devfn | 0xf0000)
+#define bus_number(pci_dev) ((((int)(pci_dev))>>8) & 0xff)
+#define devfn_number(pci_dev) (((int)(pci_dev)) & 0xff)
+#define pci_bus_number(pci_dev) ((((int)(pci_dev))>>8) & 0xff)
+#define pci_devfn(pci_dev) (((int)(pci_dev)) & 0xff)
+
+#ifndef CONFIG_PCI
+extern inline int pci_present(void) { return 0; }
+#else
+#define pci_present pcibios_present
+#endif
+
+#define pci_read_config_byte(pdev, where, valp)\
+	pcibios_read_config_byte(bus_number(pdev), devfn_number(pdev), where, valp)
+#define pci_read_config_word(pdev, where, valp)\
+	pcibios_read_config_word(bus_number(pdev), devfn_number(pdev), where, valp)
+#define pci_read_config_dword(pdev, where, valp)\
+	pcibios_read_config_dword(bus_number(pdev), devfn_number(pdev), where, valp)
+#define pci_write_config_byte(pdev, where, val)\
+	pcibios_write_config_byte(bus_number(pdev), devfn_number(pdev), where, val)
+#define pci_write_config_word(pdev, where, val)\
+	pcibios_write_config_word(bus_number(pdev), devfn_number(pdev), where, val)
+#define pci_write_config_dword(pdev, where, val)\
+	pcibios_write_config_dword(bus_number(pdev), devfn_number(pdev), where, val)
+#else
+#define PCI_SUPPORT_VER2
+#define pci_bus_number(pci_dev) ((pci_dev)->bus->number)
+#define pci_devfn(pci_dev) ((pci_dev)->devfn)
+#endif
+
+/* The arg count changed, but function name did not.
+   We cover that bad choice by defining a new name.
+*/
+#if LINUX_VERSION_CODE < 0x20159
+#define dev_free_skb(skb) dev_kfree_skb(skb, FREE_WRITE)
+#define dev_free_skb_irq(skb) dev_kfree_skb(skb, FREE_WRITE)
+#elif LINUX_VERSION_CODE < 0x20400
+#define dev_free_skb(skb) dev_kfree_skb(skb)
+#define dev_free_skb_irq(skb) dev_kfree_skb(skb)
+#else
+#define dev_free_skb(skb) dev_kfree_skb(skb)
+#define dev_free_skb_irq(skb) dev_kfree_skb_irq(skb)
+#endif
+
+/* Added at the suggestion of Jes Sorensen. */
+#if LINUX_VERSION_CODE > 0x20153
+#include <linux/init.h>
+#else
+#define __init
+#define __initdata
+#define __initfunc(__arginit) __arginit
+#endif
+
+/* The old 'struct device' used a too-generic name. */
+#if LINUX_VERSION_CODE < 0x2030d
+#define net_device device
+#endif
+
+/* More changes for the 2.4 kernel, some in the zillion 2.3.99 releases. */
+#if LINUX_VERSION_CODE < 0x20363
+#define DECLARE_MUTEX(name) struct semaphore (name) = MUTEX;
+#define down_write(semaphore_p) down(semaphore_p)
+#define down_read(semaphore_p) down(semaphore_p)
+#define up_write(semaphore_p) up(semaphore_p)
+#define up_read(semaphore_p) up(semaphore_p)
+#define get_free_page get_zeroed_page
+/* Note that the kernel version has a broken time_before()! */
+#define time_after(a,b) ((long)(b) - (long)(a) < 0)
+#define time_before(a,b) ((long)(a) - (long)(b) < 0)
+#endif
+
+/* The 2.2 kernels added the start of capability-based security for operations
+   that formerally could only be done by root.
+*/
+#if ! defined(CAP_NET_ADMIN)
+#define capable(CAP_XXX) (suser())
+#endif
+
+#if ! defined(HAVE_NETIF_QUEUE)
+#define netif_wake_queue(dev)   do { clear_bit( 0, (void*)&(dev)->tbusy); mark_bh(NET_BH); } while (0)
+#define netif_start_tx_queue(dev) do { (dev)->tbusy = 0; dev->start = 1; } while (0)
+#define netif_stop_tx_queue(dev) do { (dev)->tbusy = 1; dev->start = 0; } while (0)
+#define netif_queue_paused(dev) ((dev)->tbusy != 0)
+/* Splitting these lines exposes a bug in some preprocessors. */
+#define netif_pause_tx_queue(dev) (test_and_set_bit( 0, (void*)&(dev)->tbusy))
+#define netif_unpause_tx_queue(dev) do { clear_bit( 0, (void*)&(dev)->tbusy); } while (0)
+#define netif_resume_tx_queue(dev) do { clear_bit( 0, (void*)&(dev)->tbusy); mark_bh(NET_BH); } while (0)
+
+#define netif_running(dev) ((dev)->start != 0)
+#define netif_device_attach(dev) do {; } while (0)
+#define netif_device_detach(dev) do {; } while (0)
+#define netif_device_present(dev) (1)
+#define netif_set_tx_timeout(dev, func, deltajiffs)   do {; } while (0)
+#define netif_link_down(dev)  (dev)->flags &= ~IFF_RUNNING
+#define netif_link_up(dev)  (dev)->flags |= IFF_RUNNING
+
+#else
+
+#define netif_start_tx_queue(dev) netif_start_queue(dev)
+#define netif_stop_tx_queue(dev) netif_stop_queue(dev)
+#define netif_queue_paused(dev) netif_queue_stopped(dev)
+#define netif_resume_tx_queue(dev) netif_wake_queue(dev)
+/* Only used in transmit path.  No function in 2.4. */
+#define netif_pause_tx_queue(dev)  0
+#define netif_unpause_tx_queue(dev) do {; } while (0)
+
+#ifdef __LINK_STATE_NOCARRIER
+#define netif_link_down(dev)  netif_carrier_off(dev)
+#define netif_link_up(dev)  netif_carrier_on(dev)
+#else
+#define netif_link_down(dev)  (dev)->flags &= ~IFF_RUNNING
+#define netif_link_up(dev)  (dev)->flags |= IFF_RUNNING
+#endif
+
+#endif
+#ifndef PCI_DMA_BUS_IS_PHYS
+#define pci_dma_sync_single(pci_dev, base_addr, extent, tofrom) do {; } while (0)
+#define pci_map_single(pci_dev, base_addr, extent, dir) virt_to_bus(base_addr)
+#define pci_unmap_single(pci_dev, base_addr, extent, dir) do {; } while (0)
+#endif
+
+#endif
+/*
+ * Local variables:
+ *  c-indent-level: 4
+ *  c-basic-offset: 4
+ *  tab-width: 4
+ * End:
+ */
diff -uNr net/drivers/net/myson803.c linux-2.4.20/drivers/net/myson803.c
--- net/drivers/net/myson803.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.4.20/drivers/net/myson803.c	2003-01-14 20:29:34.000000000 -0500
@@ -0,0 +1,1650 @@
+/* myson803.c: A Linux device driver for the Myson mtd803 Ethernet chip. */
+/*
+	Written 1998-2002 by Donald Becker.
+
+	This software may be used and distributed according to the terms of
+	the GNU General Public License (GPL), incorporated herein by reference.
+	Drivers based on or derived from this code fall under the GPL and must
+	retain the authorship, copyright and license notice.  This file is not
+	a complete program and may only be used when the entire operating
+	system is licensed under the GPL.
+
+	The author may be reached as becker@scyld.com, or C/O
+	Scyld Computing Corporation
+	410 Severn Ave., Suite 210
+	Annapolis MD 21403
+
+	Support information and updates available at
+	http://www.scyld.com/network/myson803.html
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"myson803.c:v1.04 11/17/2002 Written by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+"  http://www.scyld.com/network/drivers.html\n";
+
+/* Automatically extracted configuration info:
+probe-func: myson803_probe
+config-in: tristate 'Myson MTD803 series Ethernet support' CONFIG_MYSON_ETHER
+
+c-help-name: Myson MTD803 PCI Ethernet support
+c-help-symbol: CONFIG_MYSON_ETHER
+c-help: This driver is for the Myson MTD803 Ethernet adapter series.
+c-help: More specific information and updates are available from 
+c-help: http://www.scyld.com/network/drivers.html
+*/
+
+/* The user-configurable values.
+   These may be modified when a driver module is loaded.*/
+
+/* Message enable level: 0..31 = no..all messages.  See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 40;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+   This chip uses a 64 element hash table based on the Ethernet CRC.  */
+static int multicast_filter_limit = 32;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+   Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak = 0;
+
+/* Used to pass the media type, etc.
+   Both 'options[]' and 'full_duplex[]' should exist for driver
+   interoperability.
+   The media type is usually passed in 'options[]'.
+    The default is autonegotation for speed and duplex.
+	This should rarely be overridden.
+    Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
+    Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
+    Use option values 0x20 and 0x200 for forcing full duplex operation.
+*/
+#define MAX_UNITS 8		/* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+   Making the Tx ring too large decreases the effectiveness of channel
+   bonding and packet priority.
+   There are no ill effects from too-large receive rings. */
+#define TX_RING_SIZE	16
+#define TX_QUEUE_LEN	10		/* Limit Tx ring entries actually used.  */
+#define RX_RING_SIZE	32
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT  (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+   Do not change this value without good reason.  This is not a limit,
+   but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ		1536
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning  You must compile this file with the correct options!
+#warning  See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <asm/processor.h>		/* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/unaligned.h>
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+/* Condensed operations for readability. */
+#define virt_to_le32desc(addr)  cpu_to_le32(virt_to_bus(addr))
+#define le32desc_to_virt(addr)  bus_to_virt(le32_to_cpu(addr))
+
+#if (LINUX_VERSION_CODE >= 0x20100)  &&  defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+/* Kernels before 2.1.0 cannot map the high addrs assigned by some BIOSes. */
+#if (LINUX_VERSION_CODE < 0x20100)  ||  ! defined(MODULE)
+#define USE_IO_OPS
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Myson mtd803 Ethernet driver");
+MODULE_LICENSE("GPL");
+/* List in order of common use. */
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(full_duplex, "Non-zero to force full duplex, "
+				 "non-negotiated link (depricated).");
+MODULE_PARM_DESC(max_interrupt_work,
+				 "Maximum events handled per interrupt");
+MODULE_PARM_DESC(rx_copybreak,
+				 "Breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+				 "Multicast addresses before switching to Rx-all-multicast");
+
+/*
+				Theory of Operation
+
+I. Board Compatibility
+
+This driver is for the Myson mtd803 chip.
+It should work with other Myson 800 series chips.
+
+II. Board-specific settings
+
+None.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
+Some chips explicitly use only 2^N sized rings, while others use a
+'next descriptor' pointer that the driver forms into rings.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver uses a zero-copy receive and transmit scheme.
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack.  Buffers consumed this way are replaced by newly allocated
+skbuffs in a later phase of receives.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames.  New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets.  When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine.  Copying also preloads the cache, which is
+most useful with small frames.
+
+A subtle aspect of the operation is that the IP header at offset 14 in an
+ethernet frame isn't longword aligned for further processing.
+When unaligned buffers are permitted by the hardware (and always on copies)
+frames are put into the skbuff at an offset of "+2", 16-byte aligning
+the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control.  One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag.  The other thread is the interrupt handler, which is single
+threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'lp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IIId. SMP semantics
+
+The following are serialized with respect to each other via the "xmit_lock".
+  dev->hard_start_xmit()	Transmit a packet
+  dev->tx_timeout()			Transmit watchdog for stuck Tx
+  dev->set_multicast_list()	Set the recieve filter.
+Note: The Tx timeout watchdog code is implemented by the timer routine in
+kernels up to 2.2.*.  In 2.4.* and later the timeout code is part of the
+driver interface.
+
+The following fall under the global kernel lock.  The module will not be
+unloaded during the call, unless a call with a potential reschedule e.g.
+kmalloc() is called.  No other synchronization assertion is made.
+  dev->open()
+  dev->do_ioctl()
+  dev->get_stats()
+Caution: The lock for dev->open() is commonly broken with request_irq() or
+kmalloc().  It is best to avoid any lock-breaking call in do_ioctl() and
+get_stats(), or additional module locking code must be implemented.
+
+The following is self-serialized (no simultaneous entry)
+  An handler registered with request_irq().
+
+IV. Notes
+
+IVb. References
+
+http://www.scyld.com/expert/100mbps.html
+http://scyld.com/expert/NWay.html
+http://www.myson.com.hk/mtd/datasheet/mtd803.pdf
+   Myson does not require a NDA to read the datasheet.
+
+IVc. Errata
+
+No undocumented errata.
+*/
+
+
+
+/* PCI probe routines. */
+
+static void *myson_probe1(struct pci_dev *pdev, void *init_dev,
+						   long ioaddr, int irq, int chip_idx, int find_cnt);
+static int netdev_pwr_event(void *dev_instance, int event);
+
+/* Chips prior to the 803 have an external MII transceiver. */
+enum chip_capability_flags { HasMIIXcvr=1, HasChipXcvr=2 };
+
+#ifdef USE_IO_OPS
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO  | PCI_ADDR0)
+#define PCI_IOSIZE	256
+#else
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
+#define PCI_IOSIZE	1024
+#endif
+
+static struct pci_id_info pci_id_tbl[] = {
+	{"Myson mtd803 Fast Ethernet", {0x08031516, 0xffffffff, },
+	 PCI_IOTYPE, PCI_IOSIZE, HasChipXcvr},
+	{"Myson mtd891 Gigabit Ethernet", {0x08911516, 0xffffffff, },
+	 PCI_IOTYPE, PCI_IOSIZE, HasChipXcvr},
+	{0,},						/* 0 terminated list. */
+};
+
+struct drv_id_info myson803_drv_id = {
+	"myson803", 0, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl, myson_probe1,
+	netdev_pwr_event };
+
+/* This driver was written to use PCI memory space, however x86-oriented
+   hardware sometimes works only with I/O space accesses. */
+#ifdef USE_IO_OPS
+#undef readb
+#undef readw
+#undef readl
+#undef writeb
+#undef writew
+#undef writel
+#define readb inb
+#define readw inw
+#define readl inl
+#define writeb outb
+#define writew outw
+#define writel outl
+#endif
+
+/* Offsets to the various registers.
+   Most accesses must be longword aligned. */
+enum register_offsets {
+	StationAddr=0x00, MulticastFilter0=0x08, MulticastFilter1=0x0C,
+	FlowCtrlAddr=0x10, RxConfig=0x18, TxConfig=0x1a, PCIBusCfg=0x1c,
+	TxStartDemand=0x20, RxStartDemand=0x24,
+	RxCurrentPtr=0x28, TxRingPtr=0x2c, RxRingPtr=0x30,
+	IntrStatus=0x34, IntrEnable=0x38,
+	FlowCtrlThreshold=0x3c,
+	MIICtrl=0x40, EECtrl=0x40, RxErrCnts=0x44, TxErrCnts=0x48,
+	PHYMgmt=0x4c,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+	IntrRxErr=0x0002, IntrRxDone=0x0004, IntrTxDone=0x0008,
+	IntrTxEmpty=0x0010, IntrRxEmpty=0x0020, StatsMax=0x0040, RxEarly=0x0080,
+	TxEarly=0x0100, RxOverflow=0x0200, TxUnderrun=0x0400,
+	IntrPCIErr=0x2000, NWayDone=0x4000, LinkChange=0x8000,
+};
+
+/* Bits in the RxMode (np->txrx_config) register. */
+enum rx_mode_bits {
+	RxEnable=0x01, RxFilter=0xfe,
+	AcceptErr=0x02, AcceptRunt=0x08, AcceptBroadcast=0x40,
+	AcceptMulticast=0x20, AcceptAllPhys=0x80, AcceptMyPhys=0x00,
+	RxFlowCtrl=0x2000,
+	TxEnable=0x40000, TxModeFDX=0x00100000, TxThreshold=0x00e00000,
+};
+
+/* Misc. bits. */
+enum misc_bits {
+	BCR_Reset=1,				/* PCIBusCfg */
+	TxThresholdInc=0x200000,
+};
+
+/* The Rx and Tx buffer descriptors. */
+/* Note that using only 32 bit fields simplifies conversion to big-endian
+   architectures. */
+struct netdev_desc {
+	u32 status;
+	u32 ctrl_length;
+	u32 buf_addr;
+	u32 next_desc;
+};
+
+/* Bits in network_desc.status */
+enum desc_status_bits {
+	DescOwn=0x80000000,
+	RxDescStartPacket=0x0800, RxDescEndPacket=0x0400, RxDescWholePkt=0x0c00,
+	RxDescErrSum=0x80, RxErrRunt=0x40, RxErrLong=0x20, RxErrFrame=0x10,
+	RxErrCRC=0x08, RxErrCode=0x04,
+	TxErrAbort=0x2000, TxErrCarrier=0x1000, TxErrLate=0x0800,
+	TxErr16Colls=0x0400, TxErrDefer=0x0200, TxErrHeartbeat=0x0100,
+	TxColls=0x00ff,
+};
+/* Bits in network_desc.ctrl_length */
+enum ctrl_length_bits {
+	TxIntrOnDone=0x80000000, TxIntrOnFIFO=0x40000000,
+	TxDescEndPacket=0x20000000, TxDescStartPacket=0x10000000,
+	TxAppendCRC=0x08000000, TxPadTo64=0x04000000, TxNormalPkt=0x3C000000,
+};
+
+#define PRIV_ALIGN	15 	/* Required alignment mask */
+/* Use  __attribute__((aligned (L1_CACHE_BYTES)))  to maintain alignment
+   within the structure. */
+struct netdev_private {
+	/* Descriptor rings first for alignment. */
+	struct netdev_desc rx_ring[RX_RING_SIZE];
+	struct netdev_desc tx_ring[TX_RING_SIZE];
+	struct net_device *next_module;		/* Link for devices of this type. */
+	void *priv_addr;					/* Unaligned address for kfree */
+	/* The addresses of receive-in-place skbuffs. */
+	struct sk_buff* rx_skbuff[RX_RING_SIZE];
+	/* The saved address of a sent-in-place packet/buffer, for later free(). */
+	struct sk_buff* tx_skbuff[TX_RING_SIZE];
+	struct net_device_stats stats;
+	struct timer_list timer;	/* Media monitoring timer. */
+	/* Frequently used values: keep some adjacent for cache effect. */
+	int msg_level;
+	int max_interrupt_work;
+	int intr_enable;
+	int chip_id, drv_flags;
+	struct pci_dev *pci_dev;
+
+	struct netdev_desc *rx_head_desc;
+	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
+	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
+	int rx_copybreak;
+
+	unsigned int cur_tx, dirty_tx;
+	unsigned int tx_full:1;				/* The Tx queue is full. */
+	unsigned int rx_died:1;
+	unsigned int txrx_config;
+
+	/* These values keep track of the transceiver/media in use. */
+	unsigned int full_duplex:1;			/* Full-duplex operation requested. */
+	unsigned int duplex_lock:1;
+	unsigned int medialock:1;			/* Do not sense media. */
+	unsigned int default_port:4;		/* Last dev->if_port value. */
+
+	unsigned int mcast_filter[2];
+	int multicast_filter_limit;
+
+	/* MII transceiver section. */
+	int mii_cnt;						/* MII device addresses. */
+	u16 advertising;					/* NWay media advertisement */
+	unsigned char phys[2];				/* MII device addresses. */
+};
+
+static int  eeprom_read(long ioaddr, int location);
+static int  mdio_read(struct net_device *dev, int phy_id,
+					  unsigned int location);
+static void mdio_write(struct net_device *dev, int phy_id,
+					   unsigned int location, int value);
+static int  netdev_open(struct net_device *dev);
+static void check_duplex(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void tx_timeout(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static int  start_tx(struct sk_buff *skb, struct net_device *dev);
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static void netdev_error(struct net_device *dev, int intr_status);
+static int  netdev_rx(struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int  netdev_close(struct net_device *dev);
+
+
+
+/* A list of our installed devices, for removing the driver module. */
+static struct net_device *root_net_dev = NULL;
+
+#ifndef MODULE
+int myson803_probe(struct net_device *dev)
+{
+	if (pci_drv_register(&myson803_drv_id, dev) < 0)
+		return -ENODEV;
+	if (debug >= NETIF_MSG_DRV)	/* Emit version even if no cards detected. */
+		printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+	return 0;
+}
+#endif
+
+static void *myson_probe1(struct pci_dev *pdev, void *init_dev,
+						   long ioaddr, int irq, int chip_idx, int card_idx)
+{
+	struct net_device *dev;
+	struct netdev_private *np;
+	void *priv_mem;
+	int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+
+	dev = init_etherdev(init_dev, 0);
+	if (!dev)
+		return NULL;
+
+	printk(KERN_INFO "%s: %s at 0x%lx, ",
+		   dev->name, pci_id_tbl[chip_idx].name, ioaddr);
+
+	for (i = 0; i < 3; i++)
+		((u16 *)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, i + 8));
+	if (memcmp(dev->dev_addr, "\0\0\0\0\0", 6) == 0) {
+		printk(" (MISSING EEPROM ADDRESS)");
+		memcpy(dev->dev_addr, "\100Linux", 6);
+	}
+	for (i = 0; i < 5; i++)
+		printk("%2.2x:", dev->dev_addr[i]);
+	printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+#if ! defined(final_version) /* Dump the EEPROM contents during development. */
+	if (debug > 4)
+		for (i = 0; i < 0x40; i++)
+			printk("%4.4x%s",
+				   eeprom_read(ioaddr, i), i % 16 != 15 ? " " : "\n");
+#endif
+
+	/* Make certain elements e.g. descriptor lists are aligned. */
+	priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+	/* Check for the very unlikely case of no memory. */
+	if (priv_mem == NULL)
+		return NULL;
+
+	/* Do bogusness checks before this point.
+	   We do a request_region() only to register /proc/ioports info. */
+#ifdef USE_IO_OPS
+	request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
+#endif
+
+	/* Reset the chip to erase previous misconfiguration. */
+	writel(BCR_Reset, ioaddr + PCIBusCfg);
+
+	dev->base_addr = ioaddr;
+	dev->irq = irq;
+
+	dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+	memset(np, 0, sizeof(*np));
+	np->priv_addr = priv_mem;
+
+	np->next_module = root_net_dev;
+	root_net_dev = dev;
+
+	np->pci_dev = pdev;
+	np->chip_id = chip_idx;
+	np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
+	np->msg_level = (1 << debug) - 1;
+	np->rx_copybreak = rx_copybreak;
+	np->max_interrupt_work = max_interrupt_work;
+	np->multicast_filter_limit = multicast_filter_limit;
+
+	if (dev->mem_start)
+		option = dev->mem_start;
+
+	/* The lower four bits are the media type. */
+	if (option > 0) {
+		if (option & 0x220)
+			np->full_duplex = 1;
+		np->default_port = option & 0x3ff;
+		if (np->default_port)
+			np->medialock = 1;
+	}
+	if (card_idx < MAX_UNITS  &&  full_duplex[card_idx] > 0)
+		np->full_duplex = 1;
+
+	if (np->full_duplex) {
+		if (np->msg_level & NETIF_MSG_PROBE)
+			printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
+				   " disabled.\n", dev->name);
+		np->duplex_lock = 1;
+	}
+
+	/* The chip-specific entries in the device structure. */
+	dev->open = &netdev_open;
+	dev->hard_start_xmit = &start_tx;
+	dev->stop = &netdev_close;
+	dev->get_stats = &get_stats;
+	dev->set_multicast_list = &set_rx_mode;
+	dev->do_ioctl = &mii_ioctl;
+
+	if (np->drv_flags & HasMIIXcvr) {
+		int phy, phy_idx = 0;
+		for (phy = 0; phy < 32 && phy_idx < 4; phy++) {
+			int mii_status = mdio_read(dev, phy, 1);
+			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
+				np->phys[phy_idx++] = phy;
+				np->advertising = mdio_read(dev, phy, 4);
+				if (np->msg_level & NETIF_MSG_PROBE)
+					printk(KERN_INFO "%s: MII PHY found at address %d, status "
+						   "0x%4.4x advertising %4.4x.\n",
+						   dev->name, phy, mii_status, np->advertising);
+			}
+		}
+		np->mii_cnt = phy_idx;
+	}
+	if (np->drv_flags & HasChipXcvr) {
+		np->phys[np->mii_cnt++] = 32;
+		printk(KERN_INFO "%s: Internal PHY status 0x%4.4x"
+			   " advertising %4.4x.\n",
+			   dev->name, mdio_read(dev, 32, 1), mdio_read(dev, 32, 4));
+	}
+	/* Allow forcing the media type. */
+	if (np->default_port & 0x330) {
+		np->medialock = 1;
+		if (option & 0x220)
+			np->full_duplex = 1;
+		printk(KERN_INFO "  Forcing %dMbs %s-duplex operation.\n",
+			   (option & 0x300 ? 100 : 10),
+			   (np->full_duplex ? "full" : "half"));
+		if (np->mii_cnt)
+			mdio_write(dev, np->phys[0], 0,
+					   ((option & 0x300) ? 0x2000 : 0) | 	/* 100mbps? */
+					   (np->full_duplex ? 0x0100 : 0)); /* Full duplex? */
+	}
+
+	return dev;
+}
+
+
+/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.  These are
+   often serial bit streams generated by the host processor.
+   The example below is for the common 93c46 EEPROM, 64 16 bit words. */
+
+/* This "delay" forces out buffered PCI writes.
+   The udelay() is unreliable for timing, but some Myson NICs shipped with
+   absurdly slow EEPROMs.
+ */
+#define eeprom_delay(ee_addr)	readl(ee_addr); udelay(2); readl(ee_addr)
+
+enum EEPROM_Ctrl_Bits {
+	EE_ShiftClk=0x04<<16, EE_ChipSelect=0x88<<16,
+	EE_DataOut=0x02<<16, EE_DataIn=0x01<<16,
+	EE_Write0=0x88<<16, EE_Write1=0x8a<<16,
+};
+
+/* The EEPROM commands always start with 01.. preamble bits.
+   Commands are prepended to the variable-length address. */
+enum EEPROM_Cmds { EE_WriteCmd=5, EE_ReadCmd=6, EE_EraseCmd=7, };
+
+static int eeprom_read(long addr, int location)
+{
+	int i;
+	int retval = 0;
+	long ee_addr = addr + EECtrl;
+	int read_cmd = location | (EE_ReadCmd<<6);
+
+	writel(EE_ChipSelect, ee_addr);
+
+	/* Shift the read command bits out. */
+	for (i = 10; i >= 0; i--) {
+		int dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
+		writel(dataval, ee_addr);
+		eeprom_delay(ee_addr);
+		writel(dataval | EE_ShiftClk, ee_addr);
+		eeprom_delay(ee_addr);
+	}
+	writel(EE_ChipSelect, ee_addr);
+	eeprom_delay(ee_addr);
+
+	for (i = 16; i > 0; i--) {
+		writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
+		eeprom_delay(ee_addr);
+		retval = (retval << 1) | ((readl(ee_addr) & EE_DataIn) ? 1 : 0);
+		writel(EE_ChipSelect, ee_addr);
+		eeprom_delay(ee_addr);
+	}
+
+	/* Terminate the EEPROM access. */
+	writel(EE_ChipSelect, ee_addr);
+	writel(0, ee_addr);
+	return retval;
+}
+
+/*  MII transceiver control section.
+	Read and write the MII registers using software-generated serial
+	MDIO protocol.  See the MII specifications or DP83840A data sheet
+	for details.
+
+	The maximum data clock rate is 2.5 Mhz.
+	The timing is decoupled from the processor clock by flushing the write
+	from the CPU write buffer with a following read, and using PCI
+	transaction timing. */
+#define mdio_in(mdio_addr) readl(mdio_addr)
+#define mdio_out(value, mdio_addr) writel(value, mdio_addr)
+#define mdio_delay(mdio_addr) readl(mdio_addr)
+
+/* Set iff a MII transceiver on any interface requires mdio preamble.
+   This only set with older tranceivers, so the extra
+   code size of a per-interface flag is not worthwhile. */
+static char mii_preamble_required = 0;
+
+enum mii_reg_bits {
+	MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
+};
+#define MDIO_EnbIn  (0)
+#define MDIO_WRITE0 (MDIO_EnbOutput)
+#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
+
+/* Generate the preamble required for initial synchronization and
+   a few older transceivers. */
+static void mdio_sync(long mdio_addr)
+{
+	int bits = 32;
+
+	/* Establish sync by sending at least 32 logic ones. */
+	while (--bits >= 0) {
+		mdio_out(MDIO_WRITE1, mdio_addr);
+		mdio_delay(mdio_addr);
+		mdio_out(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
+		mdio_delay(mdio_addr);
+	}
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, unsigned int location)
+{
+	long ioaddr = dev->base_addr;
+	long mdio_addr = ioaddr + MIICtrl;
+	int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+	int i, retval = 0;
+
+	if (location >= 32)
+		return 0xffff;
+	if (phy_id >= 32) {
+		if (location < 6)
+			return readw(ioaddr + PHYMgmt + location*2);
+		else if (location == 16)
+			return readw(ioaddr + PHYMgmt + 6*2);
+		else if (location == 17)
+			return readw(ioaddr + PHYMgmt + 7*2);
+		else if (location == 18)
+			return readw(ioaddr + PHYMgmt + 10*2);
+		else
+			return 0;
+	}
+
+	if (mii_preamble_required)
+		mdio_sync(mdio_addr);
+
+	/* Shift the read command bits out. */
+	for (i = 15; i >= 0; i--) {
+		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+		mdio_out(dataval, mdio_addr);
+		mdio_delay(mdio_addr);
+		mdio_out(dataval | MDIO_ShiftClk, mdio_addr);
+		mdio_delay(mdio_addr);
+	}
+	/* Read the two transition, 16 data, and wire-idle bits. */
+	for (i = 19; i > 0; i--) {
+		mdio_out(MDIO_EnbIn, mdio_addr);
+		mdio_delay(mdio_addr);
+		retval = (retval << 1) | ((mdio_in(mdio_addr) & MDIO_Data) ? 1 : 0);
+		mdio_out(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+		mdio_delay(mdio_addr);
+	}
+	return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id,
+					   unsigned int location, int value)
+{
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	long mdio_addr = ioaddr + MIICtrl;
+	int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
+	int i;
+
+	if (location == 4  &&  phy_id == np->phys[0])
+		np->advertising = value;
+	else if (location >= 32)
+		return;
+
+	if (phy_id == 32) {
+		if (location < 6)
+			writew(value, ioaddr + PHYMgmt + location*2);
+		else if (location == 16)
+			writew(value, ioaddr + PHYMgmt + 6*2);
+		else if (location == 17)
+			writew(value, ioaddr + PHYMgmt + 7*2);
+		return;
+	}
+
+	if (mii_preamble_required)
+		mdio_sync(mdio_addr);
+
+	/* Shift the command bits out. */
+	for (i = 31; i >= 0; i--) {
+		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+		mdio_out(dataval, mdio_addr);
+		mdio_delay(mdio_addr);
+		mdio_out(dataval | MDIO_ShiftClk, mdio_addr);
+		mdio_delay(mdio_addr);
+	}
+	/* Clear out extra bits. */
+	for (i = 2; i > 0; i--) {
+		mdio_out(MDIO_EnbIn, mdio_addr);
+		mdio_delay(mdio_addr);
+		mdio_out(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+		mdio_delay(mdio_addr);
+	}
+	return;
+}
+
+
+static int netdev_open(struct net_device *dev)
+{
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+
+	/* Some chips may need to be reset. */
+
+	MOD_INC_USE_COUNT;
+
+	writel(~0, ioaddr + IntrStatus);
+
+	/* Note that both request_irq() and init_ring() call kmalloc(), which
+	   break the global kernel lock protecting this routine. */
+	if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
+		MOD_DEC_USE_COUNT;
+		return -EAGAIN;
+	}
+
+	if (np->msg_level & NETIF_MSG_IFUP)
+		printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
+			   dev->name, dev->irq);
+
+	init_ring(dev);
+
+	writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
+	writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
+
+	/* Address register must be written as words. */
+	writel(cpu_to_le32(cpu_to_le32(get_unaligned((u32 *)dev->dev_addr))),
+					   ioaddr + StationAddr);
+	writel(cpu_to_le16(cpu_to_le16(get_unaligned((u16 *)(dev->dev_addr+4)))),
+					   ioaddr + StationAddr + 4);
+	/* Set the flow control address, 01:80:c2:00:00:01. */
+	writel(0x00c28001, ioaddr + FlowCtrlAddr);
+	writel(0x00000100, ioaddr + FlowCtrlAddr + 4);
+
+	/* Initialize other registers. */
+	/* Configure the PCI bus bursts and FIFO thresholds. */
+	writel(0x01f8, ioaddr + PCIBusCfg);
+
+	if (dev->if_port == 0)
+		dev->if_port = np->default_port;
+
+	np->txrx_config = TxEnable | RxEnable | RxFlowCtrl | 0x00600000;
+	np->mcast_filter[0] = np->mcast_filter[1] = 0;
+	np->rx_died = 0;
+	set_rx_mode(dev);
+	netif_start_tx_queue(dev);
+
+	/* Enable interrupts by setting the interrupt mask. */
+	np->intr_enable = IntrRxDone | IntrRxErr | IntrRxEmpty | IntrTxDone
+		| IntrTxEmpty | StatsMax | RxOverflow | TxUnderrun | IntrPCIErr
+		| NWayDone | LinkChange;
+	writel(np->intr_enable, ioaddr + IntrEnable);
+
+	if (np->msg_level & NETIF_MSG_IFUP)
+		printk(KERN_DEBUG "%s: Done netdev_open(), PHY status: %x %x.\n",
+			   dev->name, (int)readw(ioaddr + PHYMgmt),
+			   (int)readw(ioaddr + PHYMgmt + 2));
+
+	/* Set the timer to check for link beat. */
+	init_timer(&np->timer);
+	np->timer.expires = jiffies + 3*HZ;
+	np->timer.data = (unsigned long)dev;
+	np->timer.function = &netdev_timer;				/* timer handler */
+	add_timer(&np->timer);
+
+	return 0;
+}
+
+static void check_duplex(struct net_device *dev)
+{
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int new_tx_mode = np->txrx_config;
+
+	if (np->medialock) {
+	} else {
+		int mii_reg5 = mdio_read(dev, np->phys[0], 5);
+		int negotiated = mii_reg5 & np->advertising;
+		int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
+		if (np->duplex_lock  ||  mii_reg5 == 0xffff)
+			return;
+		if (duplex)
+			new_tx_mode |= TxModeFDX;
+		if (np->full_duplex != duplex) {
+			np->full_duplex = duplex;
+			if (np->msg_level & NETIF_MSG_LINK)
+				printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d"
+					   " negotiated capability %4.4x.\n", dev->name,
+					   duplex ? "full" : "half", np->phys[0], negotiated);
+		}
+	}
+	if (np->txrx_config != new_tx_mode)
+		writel(new_tx_mode, ioaddr + RxConfig);
+}
+
+static void netdev_timer(unsigned long data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int next_tick = 10*HZ;
+
+	if (np->msg_level & NETIF_MSG_TIMER) {
+		printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x.\n",
+			   dev->name, (int)readw(ioaddr + PHYMgmt + 10));
+	}
+	/* This will either have a small false-trigger window or will not catch
+	   tbusy incorrectly set when the queue is empty. */
+	if (netif_queue_paused(dev)  &&
+		np->cur_tx - np->dirty_tx > 1  &&
+		(jiffies - dev->trans_start) > TX_TIMEOUT) {
+		tx_timeout(dev);
+	}
+	/* It's dead Jim, no race condition. */
+	if (np->rx_died)
+		netdev_rx(dev);
+	check_duplex(dev);
+	np->timer.expires = jiffies + next_tick;
+	add_timer(&np->timer);
+}
+
+static void tx_timeout(struct net_device *dev)
+{
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+
+	printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
+		   " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));
+
+	if (np->msg_level & NETIF_MSG_TX_ERR) {
+		int i;
+		printk(KERN_DEBUG "  Rx ring %p: ", np->rx_ring);
+		for (i = 0; i < RX_RING_SIZE; i++)
+			printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
+		printk("\n"KERN_DEBUG"  Tx ring %p: ", np->tx_ring);
+		for (i = 0; i < TX_RING_SIZE; i++)
+			printk(" %8.8x", np->tx_ring[i].status);
+		printk("\n");
+	}
+
+	/* Stop and restart the chip's Tx processes . */
+	writel(np->txrx_config & ~TxEnable, ioaddr + RxConfig);
+	writel(virt_to_bus(np->tx_ring + (np->dirty_tx%TX_RING_SIZE)),
+		   ioaddr + TxRingPtr);
+	writel(np->txrx_config, ioaddr + RxConfig);
+	/* Trigger an immediate transmit demand. */
+	writel(0, dev->base_addr + TxStartDemand);
+
+	dev->trans_start = jiffies;
+	np->stats.tx_errors++;
+	return;
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	int i;
+
+	np->tx_full = 0;
+	np->cur_rx = np->cur_tx = 0;
+	np->dirty_rx = np->dirty_tx = 0;
+
+	np->rx_buf_sz = (dev->mtu <= 1532 ? PKT_BUF_SZ : dev->mtu + 4);
+	np->rx_head_desc = &np->rx_ring[0];
+
+	/* Initialize all Rx descriptors. */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		np->rx_ring[i].ctrl_length = cpu_to_le32(np->rx_buf_sz);
+		np->rx_ring[i].status = 0;
+		np->rx_ring[i].next_desc = virt_to_le32desc(&np->rx_ring[i+1]);
+		np->rx_skbuff[i] = 0;
+	}
+	/* Mark the last entry as wrapping the ring. */
+	np->rx_ring[i-1].next_desc = virt_to_le32desc(&np->rx_ring[0]);
+
+	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+		np->rx_skbuff[i] = skb;
+		if (skb == NULL)
+			break;
+		skb->dev = dev;			/* Mark as being used by this device. */
+		np->rx_ring[i].buf_addr = virt_to_le32desc(skb->tail);
+		np->rx_ring[i].status = cpu_to_le32(DescOwn);
+	}
+	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		np->tx_skbuff[i] = 0;
+		np->tx_ring[i].status = 0;
+		np->tx_ring[i].next_desc = virt_to_le32desc(&np->tx_ring[i+1]);
+	}
+	np->tx_ring[i-1].next_desc = virt_to_le32desc(&np->tx_ring[0]);
+	return;
+}
+
+static int start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	unsigned entry;
+
+	/* Block a timer-based transmit from overlapping.  This happens when
+	   packets are presumed lost, and we use this check the Tx status. */
+	if (netif_pause_tx_queue(dev) != 0) {
+		/* This watchdog code is redundant with the media monitor timer. */
+		if (jiffies - dev->trans_start > TX_TIMEOUT)
+			tx_timeout(dev);
+		return 1;
+	}
+
+	/* Note: Ordering is important here, set the field with the
+	   "ownership" bit last, and only then increment cur_tx. */
+
+	/* Calculate the next Tx descriptor entry. */
+	entry = np->cur_tx % TX_RING_SIZE;
+
+	np->tx_skbuff[entry] = skb;
+
+	np->tx_ring[entry].buf_addr = virt_to_le32desc(skb->data);
+	np->tx_ring[entry].ctrl_length =
+		cpu_to_le32(TxIntrOnDone | TxNormalPkt | (skb->len << 11) | skb->len);
+	np->tx_ring[entry].status = cpu_to_le32(DescOwn);
+	np->cur_tx++;
+
+	/* On some architectures: explicitly flushing cache lines here speeds
+	   operation. */
+
+	if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
+		np->tx_full = 1;
+		/* Check for a just-cleared queue. */
+		if (np->cur_tx - (volatile unsigned int)np->dirty_tx
+			< TX_QUEUE_LEN - 2) {
+			np->tx_full = 0;
+			netif_unpause_tx_queue(dev);
+		} else
+			netif_stop_tx_queue(dev);
+	} else
+		netif_unpause_tx_queue(dev);		/* Typical path */
+	/* Wake the potentially-idle transmit channel. */
+	writel(0, dev->base_addr + TxStartDemand);
+
+	dev->trans_start = jiffies;
+
+	if (np->msg_level & NETIF_MSG_TX_QUEUED) {
+		printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
+			   dev->name, np->cur_tx, entry);
+	}
+	return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+   after the Tx thread. */
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+	struct net_device *dev = (struct net_device *)dev_instance;
+	struct netdev_private *np;
+	long ioaddr;
+	int boguscnt;
+
+#ifndef final_version			/* Can never occur. */
+	if (dev == NULL) {
+		printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown "
+				"device.\n", irq);
+		return;
+	}
+#endif
+
+	ioaddr = dev->base_addr;
+	np = (struct netdev_private *)dev->priv;
+	boguscnt = np->max_interrupt_work;
+
+#if defined(__i386__)  &&  LINUX_VERSION_CODE < 0x020300
+	/* A lock to prevent simultaneous entry bug on Intel SMP machines. */
+	if (test_and_set_bit(0, (void*)&dev->interrupt)) {
+		printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
+			   dev->name);
+		dev->interrupt = 0;	/* Avoid halting machine. */
+		return;
+	}
+#endif
+
+	do {
+		u32 intr_status = readl(ioaddr + IntrStatus);
+
+		/* Acknowledge all of the current interrupt sources ASAP. */
+		writel(intr_status, ioaddr + IntrStatus);
+
+		if (np->msg_level & NETIF_MSG_INTR)
+			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
+				   dev->name, intr_status);
+
+		if (intr_status == 0)
+			break;
+
+		if (intr_status & IntrRxDone)
+			netdev_rx(dev);
+
+		for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+			int entry = np->dirty_tx % TX_RING_SIZE;
+			int tx_status = le32_to_cpu(np->tx_ring[entry].status);
+			if (tx_status & DescOwn)
+				break;
+			if (np->msg_level & NETIF_MSG_TX_DONE)
+				printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
+					   dev->name, tx_status);
+			if (tx_status & (TxErrAbort | TxErrCarrier | TxErrLate
+							 | TxErr16Colls | TxErrHeartbeat)) {
+				if (np->msg_level & NETIF_MSG_TX_ERR)
+					printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+						   dev->name, tx_status);
+				np->stats.tx_errors++;
+				if (tx_status & TxErrCarrier) np->stats.tx_carrier_errors++;
+				if (tx_status & TxErrLate) np->stats.tx_window_errors++;
+				if (tx_status & TxErrHeartbeat) np->stats.tx_heartbeat_errors++;
+#ifdef ETHER_STATS
+				if (tx_status & TxErr16Colls) np->stats.collisions16++;
+				if (tx_status & TxErrAbort) np->stats.tx_aborted_errors++;
+#else
+				if (tx_status & (TxErr16Colls|TxErrAbort))
+					np->stats.tx_aborted_errors++;
+#endif
+			} else {
+				np->stats.tx_packets++;
+				np->stats.collisions += tx_status & TxColls;
+#if LINUX_VERSION_CODE > 0x20127
+				np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+#endif
+#ifdef ETHER_STATS
+				if (tx_status & TxErrDefer) np->stats.tx_deferred++;
+#endif
+			}
+			/* Free the original skb. */
+			dev_free_skb_irq(np->tx_skbuff[entry]);
+			np->tx_skbuff[entry] = 0;
+		}
+		/* Note the 4 slot hysteresis to mark the queue non-full. */
+		if (np->tx_full  &&  np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+			/* The ring is no longer full, allow new TX entries. */
+			np->tx_full = 0;
+			netif_resume_tx_queue(dev);
+		}
+
+		/* Abnormal error summary/uncommon events handlers. */
+		if (intr_status & (IntrRxErr | IntrRxEmpty | StatsMax | RxOverflow
+						   | TxUnderrun | IntrPCIErr | NWayDone | LinkChange))
+			netdev_error(dev, intr_status);
+
+		if (--boguscnt < 0) {
+			printk(KERN_WARNING "%s: Too much work at interrupt, "
+				   "status=0x%4.4x.\n",
+				   dev->name, intr_status);
+			break;
+		}
+	} while (1);
+
+	if (np->msg_level & NETIF_MSG_INTR)
+		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+			   dev->name, (int)readl(ioaddr + IntrStatus));
+
+#if defined(__i386__)  &&  LINUX_VERSION_CODE < 0x020300
+	clear_bit(0, (void*)&dev->interrupt);
+#endif
+	return;
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+   for clarity and better register allocation. */
+static int netdev_rx(struct net_device *dev)
+{
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	int entry = np->cur_rx % RX_RING_SIZE;
+	int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
+	int refilled = 0;
+
+	if (np->msg_level & NETIF_MSG_RX_STATUS) {
+		printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
+			   entry, np->rx_ring[entry].status);
+	}
+
+	/* If EOP is set on the next entry, it's a new packet. Send it up. */
+	while ( ! (np->rx_head_desc->status & cpu_to_le32(DescOwn))) {
+		struct netdev_desc *desc = np->rx_head_desc;
+		u32 desc_status = le32_to_cpu(desc->status);
+
+		if (np->msg_level & NETIF_MSG_RX_STATUS)
+			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
+				   desc_status);
+		if (--boguscnt < 0)
+			break;
+		if ((desc_status & RxDescWholePkt) != RxDescWholePkt) {
+			printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
+				   "multiple buffers, entry %#x length %d status %4.4x!\n",
+				   dev->name, np->cur_rx, desc_status >> 16, desc_status);
+			np->stats.rx_length_errors++;
+		} else if (desc_status & RxDescErrSum) {
+			/* There was a error. */
+			if (np->msg_level & NETIF_MSG_RX_ERR)
+				printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
+					   desc_status);
+			np->stats.rx_errors++;
+			if (desc_status & (RxErrLong|RxErrRunt))
+				np->stats.rx_length_errors++;
+			if (desc_status & (RxErrFrame|RxErrCode))
+				np->stats.rx_frame_errors++;
+			if (desc_status & RxErrCRC)
+				np->stats.rx_crc_errors++;
+		} else {
+			struct sk_buff *skb;
+			/* Reported length should omit the CRC. */
+			u16 pkt_len = ((desc_status >> 16) & 0xfff) - 4;
+
+#ifndef final_version
+			if (np->msg_level & NETIF_MSG_RX_STATUS)
+				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
+					   " of %d, bogus_cnt %d.\n",
+					   pkt_len, pkt_len, boguscnt);
+#endif
+			/* Check if the packet is long enough to accept without copying
+			   to a minimally-sized skbuff. */
+			if (pkt_len < np->rx_copybreak
+				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+				skb->dev = dev;
+				skb_reserve(skb, 2);	/* 16 byte align the IP header */
+				eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+				skb_put(skb, pkt_len);
+			} else {
+				skb_put(skb = np->rx_skbuff[entry], pkt_len);
+				np->rx_skbuff[entry] = NULL;
+			}
+#ifndef final_version				/* Remove after testing. */
+			/* You will want this info for the initial debug. */
+			if (np->msg_level & NETIF_MSG_PKTDATA)
+				printk(KERN_DEBUG "  Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
+					   "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
+					   "%d.%d.%d.%d.\n",
+					   skb->data[0], skb->data[1], skb->data[2], skb->data[3],
+					   skb->data[4], skb->data[5], skb->data[6], skb->data[7],
+					   skb->data[8], skb->data[9], skb->data[10],
+					   skb->data[11], skb->data[12], skb->data[13],
+					   skb->data[14], skb->data[15], skb->data[16],
+					   skb->data[17]);
+#endif
+			skb->mac.raw = skb->data;
+			/* Protocol lookup disabled until verified with all kernels. */
+			if (0 && ntohs(skb->mac.ethernet->h_proto) >= 0x0800) {
+				struct ethhdr *eth = skb->mac.ethernet;
+				skb->protocol = eth->h_proto;
+				if (desc_status & 0x1000) {
+					if ((dev->flags & IFF_PROMISC) &&
+						memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
+						skb->pkt_type = PACKET_OTHERHOST;
+				} else if (desc_status & 0x2000)
+					skb->pkt_type = PACKET_BROADCAST;
+				else if (desc_status & 0x4000)
+					skb->pkt_type = PACKET_MULTICAST;
+			} else
+				skb->protocol = eth_type_trans(skb, dev);
+			netif_rx(skb);
+			dev->last_rx = jiffies;
+			np->stats.rx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+			np->stats.rx_bytes += pkt_len;
+#endif
+		}
+		entry = (++np->cur_rx) % RX_RING_SIZE;
+		np->rx_head_desc = &np->rx_ring[entry];
+	}
+
+	/* Refill the Rx ring buffers. */
+	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+		struct sk_buff *skb;
+		entry = np->dirty_rx % RX_RING_SIZE;
+		if (np->rx_skbuff[entry] == NULL) {
+			skb = dev_alloc_skb(np->rx_buf_sz);
+			np->rx_skbuff[entry] = skb;
+			if (skb == NULL)
+				break;				/* Better luck next round. */
+			skb->dev = dev;			/* Mark as being used by this device. */
+			np->rx_ring[entry].buf_addr = virt_to_le32desc(skb->tail);
+		}
+		np->rx_ring[entry].ctrl_length = cpu_to_le32(np->rx_buf_sz);
+		np->rx_ring[entry].status = cpu_to_le32(DescOwn);
+		refilled++;
+	}
+
+	/* Restart Rx engine if stopped. */
+	if (refilled) {				/* Perhaps  "&& np->rx_died" */
+		writel(0, dev->base_addr + RxStartDemand);
+		np->rx_died = 0;
+	}
+	return refilled;
+}
+
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+
+	if (intr_status & (LinkChange | NWayDone)) {
+		if (np->msg_level & NETIF_MSG_LINK)
+			printk(KERN_NOTICE "%s: Link changed: Autonegotiation advertising"
+				   " %4.4x  partner %4.4x.\n", dev->name,
+				   mdio_read(dev, np->phys[0], 4),
+				   mdio_read(dev, np->phys[0], 5));
+		/* Clear sticky bit first. */
+		readw(ioaddr + PHYMgmt + 2);
+		if (readw(ioaddr + PHYMgmt + 2) & 0x0004)
+			netif_link_up(dev);
+		else
+			netif_link_down(dev);
+		check_duplex(dev);
+	}
+	if ((intr_status & TxUnderrun)
+		&& (np->txrx_config & TxThreshold) != TxThreshold) {
+		np->txrx_config += TxThresholdInc;
+		writel(np->txrx_config, ioaddr + RxConfig);
+		np->stats.tx_fifo_errors++;
+	}
+	if (intr_status & IntrRxEmpty) {
+		printk(KERN_WARNING "%s: Out of receive buffers: no free memory.\n",
+			   dev->name);
+		/* Refill Rx descriptors */
+		np->rx_died = 1;
+		netdev_rx(dev);
+	}
+	if (intr_status & RxOverflow) {
+		printk(KERN_WARNING "%s: Receiver overflow.\n", dev->name);
+		np->stats.rx_over_errors++;
+		netdev_rx(dev);			/* Refill Rx descriptors */
+		get_stats(dev);			/* Empty dropped counter. */
+	}
+	if (intr_status & StatsMax) {
+		get_stats(dev);
+	}
+	if ((intr_status & ~(LinkChange|NWayDone|StatsMax|TxUnderrun|RxOverflow
+						 |TxEarly|RxEarly))
+		&& (np->msg_level & NETIF_MSG_DRV))
+		printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
+			   dev->name, intr_status);
+	/* Hmmmmm, it's not clear how to recover from PCI faults. */
+	if (intr_status & IntrPCIErr) {
+		const char *const pcierr[4] =
+		{ "Parity Error", "Master Abort", "Target Abort", "Unknown Error" };
+		if (np->msg_level & NETIF_MSG_DRV)
+			printk(KERN_WARNING "%s: PCI Bus %s, %x.\n",
+				   dev->name, pcierr[(intr_status>>11) & 3], intr_status);
+	}
+}
+
+/* We do not bother to spinlock statistics.
+   A window only exists if we have non-atomic adds, the error counts are
+   typically zero, and statistics are non-critical. */ 
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+	long ioaddr = dev->base_addr;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	unsigned int rxerrs = readl(ioaddr + RxErrCnts);
+	unsigned int txerrs = readl(ioaddr + TxErrCnts);
+
+	/* The chip only need report frames silently dropped. */
+	np->stats.rx_crc_errors	+= rxerrs >> 16;
+	np->stats.rx_missed_errors	+= rxerrs & 0xffff;
+
+	/* These stats are required when the descriptor is closed before Tx. */
+	np->stats.tx_aborted_errors += txerrs >> 24;
+	np->stats.tx_window_errors += (txerrs >> 16) & 0xff;
+	np->stats.collisions += txerrs & 0xffff;
+
+	return &np->stats;
+}
+
+/* Big-endian AUTODIN II ethernet CRC calculations.
+   This is slow but compact code.  Do not use this routine for bulk data,
+   use a table-based routine instead.
+   This is common code and may be in the kernel with Linux 2.5+.
+*/
+static unsigned const ethernet_polynomial = 0x04c11db7U;
+static inline u32 ether_crc(int length, unsigned char *data)
+{
+	u32 crc = ~0;
+
+	while(--length >= 0) {
+		unsigned char current_octet = *data++;
+		int bit;
+		for (bit = 0; bit < 8; bit++, current_octet >>= 1)
+			crc = (crc << 1) ^
+				((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
+	}
+	return crc;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	u32 mc_filter[2];			/* Multicast hash filter */
+	u32 rx_mode;
+
+	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
+		/* Unconditionally log net taps. */
+		printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+		mc_filter[1] = mc_filter[0] = ~0;
+		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAllPhys
+			| AcceptMyPhys;
+	} else if ((dev->mc_count > np->multicast_filter_limit)
+			   ||  (dev->flags & IFF_ALLMULTI)) {
+		/* Too many to match, or accept all multicasts. */
+		mc_filter[1] = mc_filter[0] = ~0;
+		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+	} else {
+		struct dev_mc_list *mclist;
+		int i;
+		mc_filter[1] = mc_filter[0] = 0;
+		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+			 i++, mclist = mclist->next) {
+			set_bit((ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) & 0x3f,
+					mc_filter);
+		}
+		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+	}
+	if (mc_filter[0] != np->mcast_filter[0]  ||
+		mc_filter[1] != np->mcast_filter[1]) {
+		writel(mc_filter[0], ioaddr + MulticastFilter0);
+		writel(mc_filter[1], ioaddr + MulticastFilter1);
+		np->mcast_filter[0] = mc_filter[0];
+		np->mcast_filter[1] = mc_filter[1];
+	}
+	if ((np->txrx_config & RxFilter) != rx_mode) {
+		np->txrx_config &= ~RxFilter;
+		np->txrx_config |= rx_mode;
+		writel(np->txrx_config, ioaddr + RxConfig);
+	}
+}
+
+/*
+  Handle user-level ioctl() calls.
+  We must use two numeric constants as the key because some clueless person
+  changed the value for the symbolic name.
+*/
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	u16 *data = (u16 *)&rq->ifr_data;
+	u32 *data32 = (void *)&rq->ifr_data;
+
+	switch(cmd) {
+	case 0x8947: case 0x89F0:
+		/* SIOCGMIIPHY: Get the address of the PHY in use. */
+		data[0] = np->phys[0];
+		/* Fall Through */
+	case 0x8948: case 0x89F1:
+		/* SIOCGMIIREG: Read the specified MII register. */
+		data[3] = mdio_read(dev, data[0], data[1]);
+		return 0;
+	case 0x8949: case 0x89F2:
+		/* SIOCSMIIREG: Write the specified MII register */
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		if (data[0] == np->phys[0]) {
+			u16 value = data[2];
+			switch (data[1]) {
+			case 0:
+				/* Check for autonegotiation on or reset. */
+				np->medialock = (value & 0x9000) ? 0 : 1;
+				if (np->medialock)
+					np->full_duplex = (value & 0x0100) ? 1 : 0;
+				break;
+			case 4: np->advertising = value; break;
+			}
+			/* Perhaps check_duplex(dev), depending on chip semantics. */
+		}
+		mdio_write(dev, data[0], data[1], data[2]);
+		return 0;
+	case SIOCGPARAMS:
+		data32[0] = np->msg_level;
+		data32[1] = np->multicast_filter_limit;
+		data32[2] = np->max_interrupt_work;
+		data32[3] = np->rx_copybreak;
+		return 0;
+	case SIOCSPARAMS:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		np->msg_level = data32[0];
+		np->multicast_filter_limit = data32[1];
+		np->max_interrupt_work = data32[2];
+		np->rx_copybreak = data32[3];
+		return 0;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int netdev_close(struct net_device *dev)
+{
+	long ioaddr = dev->base_addr;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	int i;
+
+	netif_stop_tx_queue(dev);
+
+	if (np->msg_level & NETIF_MSG_IFDOWN) {
+		printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x.\n",
+			   dev->name, (int)readl(ioaddr + RxConfig));
+		printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
+			   dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
+	}
+
+	/* Disable interrupts by clearing the interrupt mask. */
+	writel(0x0000, ioaddr + IntrEnable);
+
+	/* Stop the chip's Tx and Rx processes. */
+	np->txrx_config = 0;
+	writel(0, ioaddr + RxConfig);
+
+	del_timer(&np->timer);
+
+#ifdef __i386__
+	if (np->msg_level & NETIF_MSG_IFDOWN) {
+		printk("\n"KERN_DEBUG"  Tx ring at %8.8x:\n",
+			   (int)virt_to_bus(np->tx_ring));
+		for (i = 0; i < TX_RING_SIZE; i++)
+			printk(" #%d desc. %x %x %8.8x.\n",
+				   i, np->tx_ring[i].status, np->tx_ring[i].ctrl_length,
+				   np->tx_ring[i].buf_addr);
+		printk("\n"KERN_DEBUG "  Rx ring %8.8x:\n",
+			   (int)virt_to_bus(np->rx_ring));
+		for (i = 0; i < RX_RING_SIZE; i++) {
+			printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
+				   i, np->rx_ring[i].status, np->rx_ring[i].ctrl_length,
+				   np->rx_ring[i].buf_addr);
+		}
+	}
+#endif /* __i386__ debugging only */
+
+	free_irq(dev->irq, dev);
+
+	/* Free all the skbuffs in the Rx queue. */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		np->rx_ring[i].status = 0;
+		np->rx_ring[i].buf_addr = 0xBADF00D0; /* An invalid address. */
+		if (np->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+			np->rx_skbuff[i]->free = 1;
+#endif
+			dev_free_skb(np->rx_skbuff[i]);
+		}
+		np->rx_skbuff[i] = 0;
+	}
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		if (np->tx_skbuff[i])
+			dev_free_skb(np->tx_skbuff[i]);
+		np->tx_skbuff[i] = 0;
+	}
+
+	MOD_DEC_USE_COUNT;
+
+	return 0;
+}
+
+static int netdev_pwr_event(void *dev_instance, int event)
+{
+	struct net_device *dev = dev_instance;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+
+	if (np->msg_level & NETIF_MSG_LINK)
+		printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
+	switch(event) {
+	case DRV_ATTACH:
+		MOD_INC_USE_COUNT;
+		break;
+	case DRV_SUSPEND:
+		/* Disable interrupts, stop Tx and Rx. */
+		writel(0, ioaddr + IntrEnable);
+		writel(0, ioaddr + RxConfig);
+		break;
+	case DRV_RESUME:
+		/* This is incomplete: the actions are very chip specific. */
+		set_rx_mode(dev);
+		writel(np->intr_enable, ioaddr + IntrEnable);
+		break;
+	case DRV_DETACH: {
+		struct net_device **devp, **next;
+		if (dev->flags & IFF_UP) {
+			/* Some, but not all, kernel versions close automatically. */
+			dev_close(dev);
+			dev->flags &= ~(IFF_UP|IFF_RUNNING);
+		}
+		unregister_netdev(dev);
+		release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
+#ifndef USE_IO_OPS
+		iounmap((char *)dev->base_addr);
+#endif
+		for (devp = &root_net_dev; *devp; devp = next) {
+			next = &((struct netdev_private *)(*devp)->priv)->next_module;
+			if (*devp == dev) {
+				*devp = *next;
+				break;
+			}
+		}
+		if (np->priv_addr)
+			kfree(np->priv_addr);
+		kfree(dev);
+		MOD_DEC_USE_COUNT;
+		break;
+	}
+	}
+
+	return 0;
+}
+
+
+#ifdef MODULE
+int init_module(void)
+{
+	if (debug >= NETIF_MSG_DRV)	/* Emit version even if no cards detected. */
+		printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+	return pci_drv_register(&myson803_drv_id, NULL);
+}
+
+void cleanup_module(void)
+{
+	struct net_device *next_dev;
+
+	pci_drv_unregister(&myson803_drv_id);
+
+	/* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+	while (root_net_dev) {
+		struct netdev_private *np = (void *)(root_net_dev->priv);
+		unregister_netdev(root_net_dev);
+#ifdef USE_IO_OPS
+		release_region(root_net_dev->base_addr,
+					   pci_id_tbl[np->chip_id].io_size);
+#else
+		iounmap((char *)(root_net_dev->base_addr));
+#endif
+		next_dev = np->next_module;
+		if (np->priv_addr)
+			kfree(np->priv_addr);
+		kfree(root_net_dev);
+		root_net_dev = next_dev;
+	}
+}
+
+#endif  /* MODULE */
+
+/*
+ * Local variables:
+ *  compile-command: "make KERNVER=`uname -r` myson803.o"
+ *  compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c myson803.c"
+ *  simple-compile-command: "gcc -DMODULE -O6 -c myson803.c"
+ *  c-indent-level: 4
+ *  c-basic-offset: 4
+ *  tab-width: 4
+ * End:
+ */
diff -uNr net/drivers/net/netdrivers.spec linux-2.4.20/drivers/net/netdrivers.spec
--- net/drivers/net/netdrivers.spec	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.4.20/drivers/net/netdrivers.spec	2003-01-14 20:29:34.000000000 -0500
@@ -0,0 +1,105 @@
+%{!?ksource_dir: %define ksource_dir /lib/modules/%(uname -r)/build}
+%define uts_release %( gcc -E -D__BOOT_KERNEL_H_ -dM %{ksource_dir}/include/linux/version.h | grep UTS | sed 's/^#define UTS_RELEASE //;s/"//g' )
+
+Summary: Network device driver updates for Linux.
+Name: netdrivers
+Version: 3
+Release: 3
+Copyright: GPL
+Group: System Environment/Kernel
+Source0: ~becker/netdrivers/netdrivers-%{version}.tar.gz
+URL:     http://www.scyld.com/network/index.html
+
+Distribution: Scyld Beowulf
+Vendor: Scyld Computing Corp.
+Packager: Donald Becker <becker@scyld.com>
+Buildroot: %{_tmppath}/%{name}-%{version}-root 
+
+%description
+PCI network device driver updates for all Linux kernel versions.
+This package contains new, enhanced and updated network device drivers
+from Scyld Computing Corporation.
+
+%package modules
+%{expand: %%define kernelvers   %(if [ -z "$LINUX" ] ; then LINUX=%{ksource_dir}; fi; (cat $LINUX/include/linux/version.h ; echo UTS_RELEASE) | cpp -P -D__BOOT_KERNEL_H_ | tr -d \")}
+%{expand: %%define releaseextra .k%(echo %{kernelvers}|tr - _)}
+Release: @PACKAGE_RELEASE@%{releaseextra}
+Summary: Network device driver updates for Linux.
+Group: System Environment/Kernel
+
+%description modules
+PCI network device driver updates for all Linux kernel versions.
+This package contains new, enhanced and fixed network device drivers.
+
+%define all_ppc ppc ppciseries ppcpseries ppc64
+%define all_x86 i686 i386 i586 athlon x86_64
+
+%prep
+%setup -q
+
+%build
+# Build for beoboot
+make -s EXTRACFLAGS="-D__BOOT_KERNEL_H_ -D__BOOT_KERNEL_BEOBOOT=1 -D__MODULE_KERNEL_i386=1" KERNVER="%{uts_release}"
+mkdir -p beoboot/net
+make install MODULEDIR=beoboot PREFIX=nodepmod KERNVER="%{uts_release}"
+make -s clean
+
+# Build for SMP
+make -s EXTRACFLAGS="-D__BOOT_KERNEL_H_ -D__BOOT_KERNEL_SMP=1 -D__MODULE_KERNEL_%{_target_cpu}=1" KERNVER="%{uts_release}"
+mkdir -p smp/net
+make install MODULEDIR=smp PREFIX=nodepmod KERNVER="%{uts_release}"
+make -s clean
+
+# Build for UP
+make -s EXTRACFLAGS="-D__BOOT_KERNEL_H_ -D__BOOT_KERNEL_UP=1 -D__MODULE_KERNEL_%{_target_cpu}=1" KERNVER="%{uts_release}"
+mkdir -p up/net
+make install MODULEDIR=up PREFIX=nodepmod KERNVER="%{uts_release}"
+make -s clean
+
+%ifarch i386
+make -s EXTRACFLAGS="-D__BOOT_KERNEL_H_ -D__BOOT_KERNEL_BOOT=1 -D__MODULE_KERNEL_%{_target_cpu}=1" KERNVER="%{uts_release}"
+mkdir -p BOOT/net
+make install MODULEDIR=BOOT PREFIX=nodepmod KERNVER="%{uts_release}"
+make -s clean
+%endif
+
+
+%install
+if test "$RPM_BUILD_ROOT" != "/"; then
+	rm -rf "$RPM_BUILD_ROOT"
+fi
+
+for type in debug enterprise beoboot smp up BOOT; do
+    if [ -d $type ]; then
+	pushd $type/net
+	if [ $type = "up" ]; then
+	  KVER=%{uts_release}
+	else
+	  KVER=%{uts_release}${type}
+	fi
+	%{__mkdir} -p $RPM_BUILD_ROOT/lib/modules/${KVER}/net
+	%{__install} -m 0644 * $RPM_BUILD_ROOT/lib/modules/${KVER}/net/
+	popd
+    fi
+done
+
+%clean
+if test "$RPM_BUILD_ROOT" != "/"; then
+	rm -rf "$RPM_BUILD_ROOT"
+fi
+
+%post
+if [ -x /sbin/depmod ] && [ -w /boot/System.map-%{uts_release} ] ; then /sbin/depmod -a -F /boot/System.map-%{uts_release} %{uts_release} ; fi
+if [ -x /sbin/depmod ] && [ -w /boot/System.map-%{uts_release}smp ] ; then /sbin/depmod -a -F /boot/System.map-%{uts_release}smp %{uts_release}smp ; fi
+
+%files modules
+%defattr(-,root,root)
+/lib/modules/*/*/*
+
+%ifarch i386
+%files
+%endif
+
+# Local variables:
+#  compile-command: "rpm -ba netdrivers.spec"
+# End:
diff -uNr net/drivers/net/ns820.c linux-2.4.20/drivers/net/ns820.c
--- net/drivers/net/ns820.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.4.20/drivers/net/ns820.c	2003-01-14 20:29:34.000000000 -0500
@@ -0,0 +1,1538 @@
+/* ns820.c: A Linux Gigabit Ethernet driver for the NatSemi DP83820 series. */
+/*
+	Written/copyright 1999-2002 by Donald Becker.
+
+	This software may be used and distributed according to the terms of
+	the GNU General Public License (GPL), incorporated herein by reference.
+	Drivers based on or derived from this code fall under the GPL and must
+	retain the authorship, copyright and license notice.  This file is not
+	a complete program and may only be used when the entire operating
+	system is licensed under the GPL.  License for under other terms may be
+	available.  Contact the original author for details.
+
+	The original author may be reached as becker@scyld.com, or at
+	Scyld Computing Corporation
+	410 Severn Ave., Suite 210
+	Annapolis MD 21403
+
+	Support information and updates available at
+	http://www.scyld.com/network/natsemi.html
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"ns820.c:v1.02 11/17/2002  Written by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+"  http://www.scyld.com/network/natsemi.html\n";
+/* Updated to recommendations in pci-skeleton v2.11. */
+
+/* Automatically extracted configuration info:
+probe-func: ns820_probe
+config-in: tristate 'National Semiconductor DP8382x series PCI Ethernet support' CONFIG_NATSEMI820
+
+c-help-name: National Semiconductor DP8382x series PCI Ethernet support
+c-help-symbol: CONFIG_NATSEMI820
+c-help: This driver is for the National Semiconductor DP83820 Gigabit Ethernet
+c-help: adapter series.
+c-help: More specific information and updates are available from
+c-help: http://www.scyld.com/network/natsemi.html
+*/
+
+/* The user-configurable values.
+   These may be modified when a driver module is loaded.*/
+
+/* Message enable level: 0..31 = no..all messages.  See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+   This chip uses a 2048 element hash table based on the Ethernet CRC.
+   Previous natsemi chips had unreliable multicast filter circuitry.
+   To work around an observed problem set this value to '0',
+   which will immediately switch to Rx-all-multicast.
+  */
+static int multicast_filter_limit = 100;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+   Setting to > 1518 effectively disables this feature.
+   This chip can only receive into aligned buffers, so architectures such
+   as the Alpha AXP might benefit from a copy-align.
+*/
+static int rx_copybreak = 0;
+
+/* Used to pass the media type, etc.
+   Both 'options[]' and 'full_duplex[]' should exist for driver
+   interoperability.
+   The media type is usually passed in 'options[]'.
+    The default is autonegotation for speed and duplex.
+	This should rarely be overridden.
+    Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
+    Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
+    Use option values 0x20 and 0x200 for forcing full duplex operation.
+	Use 0x1000 or 0x2000 for gigabit.
+*/
+#define MAX_UNITS 8		/* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+   Understand the implications before changing these settings!
+   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+   Making the Tx ring too large decreases the effectiveness of channel
+   bonding and packet priority.
+   Too-large receive rings waste memory and confound network buffer limits. */
+#define TX_RING_SIZE	16
+#define TX_QUEUE_LEN	10		/* Limit ring entries actually used, min 4.  */
+#define RX_RING_SIZE	64
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT  (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+   Do not change this value without good reason.  This is not a limit,
+   but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ		1536
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning  You must compile this file with the correct options!
+#warning  See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h>		/* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+#if (LINUX_VERSION_CODE >= 0x20100)  &&  defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("National Semiconductor DP83820 series PCI Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(max_interrupt_work,
+				 "Driver maximum events handled per interrupt");
+MODULE_PARM_DESC(full_duplex,
+				 "Non-zero to force full duplex, non-negotiated link.");
+MODULE_PARM_DESC(rx_copybreak,
+				 "Breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+				 "Multicast addresses before switching to Rx-all-multicast");
+
+/*
+				Theory of Operation
+
+I. Board Compatibility
+
+This driver is designed for National Semiconductor DP83820 10/100/1000
+Ethernet NIC.  It is superficially similar to the 810 series "natsemi.c"
+driver, however the register layout, descriptor layout and element
+length of the new chip series is different.
+
+II. Board-specific settings
+
+This driver requires the PCI interrupt line to be configured.
+It honors the EEPROM-set values.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
+The NatSemi design uses a 'next descriptor' pointer that the driver forms
+into a list, thus rings can be arbitrarily sized.  Before changing the
+ring sizes you should understand the flow and cache effects of the
+full/available/empty hysteresis.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver uses a zero-copy receive and transmit scheme.
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack.  Buffers consumed this way are replaced by newly allocated
+skbuffs in a later phase of receives.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames.  New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets.  When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine.  Copying also preloads the cache, which is
+most useful with small frames.
+
+A subtle aspect of the operation is that unaligned buffers are not permitted
+by the hardware.  Thus the IP header at offset 14 in an ethernet frame isn't
+longword aligned for further processing.  On copies frames are put into the
+skbuff at an offset of "+2", 16-byte aligning the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control.  One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag.  The other thread is the interrupt handler, which is single
+threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'lp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IV. Notes
+
+The NatSemi 820 series PCI gigabit chips are very common on low-cost NICs.
+The '821 appears to be the same as '820 chip, only with pins for the upper
+32 bits marked "N/C".
+
+IVb. References
+
+http://www.scyld.com/expert/100mbps.html
+http://www.scyld.com/expert/NWay.html
+The NatSemi dp83820 datasheet is available: search www.natsemi.com
+
+IVc. Errata
+
+None characterised.
+
+*/
+
+
+
+static void *ns820_probe1(struct pci_dev *pdev, void *init_dev,
+							long ioaddr, int irq, int chip_idx, int find_cnt);
+static int power_event(void *dev_instance, int event);
+enum chip_capability_flags {FDXActiveLow=1, InvertGbXcvrPwr=2, };
+#ifdef USE_IO_OPS
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO  | PCI_ADDR0)
+#else
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
+#endif
+
+static struct pci_id_info pci_id_tbl[] = {
+	{ "D-Link DGE-500T (DP83820)",
+	  { 0x0022100B, 0xffffffff, 0x49001186, 0xffffffff, },
+	  PCI_IOTYPE, 256, FDXActiveLow},
+	{"NatSemi DP83820", { 0x0022100B, 0xffffffff },
+	 PCI_IOTYPE, 256, 0},
+	{0,},						/* 0 terminated list. */
+};
+
+struct drv_id_info natsemi_drv_id = {
+	"ns820", 0, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl, ns820_probe1,
+	power_event };
+
+/* Offsets to the device registers.
+   Unlike software-only systems, device drivers interact with complex hardware.
+   It's not useful to define symbolic names for every register bit in the
+   device.  Please do not change these names without good reason.
+*/
+enum register_offsets {
+	ChipCmd=0x00, ChipConfig=0x04, EECtrl=0x08, PCIBusCfg=0x0C,
+	IntrStatus=0x10, IntrMask=0x14, IntrEnable=0x18, IntrHoldoff=0x1C,
+	TxRingPtr=0x20, TxRingPtrHi=0x24, TxConfig=0x28,
+	RxRingPtr=0x30, RxRingPtrHi=0x34, RxConfig=0x38,
+	WOLCmd=0x40, PauseCmd=0x44, RxFilterAddr=0x48, RxFilterData=0x4C,
+	BootRomAddr=0x50, BootRomData=0x54, ChipRevReg=0x58,
+	StatsCtrl=0x5C, RxPktErrs=0x60, RxMissed=0x68, RxCRCErrs=0x64,
+};
+
+/* Bits in ChipCmd. */
+enum ChipCmdBits {
+	ChipReset=0x100, SoftIntr=0x80, RxReset=0x20, TxReset=0x10,
+	RxOff=0x08, RxOn=0x04, TxOff=0x02, TxOn=0x01,
+};
+
+/* Bits in ChipConfig. */
+enum ChipConfigBits {
+	CfgLinkGood=0x80000000, CfgFDX=0x10000000,
+	CfgXcrReset=0x0400, CfgXcrOff=0x0200,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+	IntrRxDone=0x0001, IntrRxIntr=0x0002, IntrRxErr=0x0004, IntrRxEarly=0x0008,
+	IntrRxIdle=0x0010, IntrRxOverrun=0x0020,
+	IntrTxDone=0x0040, IntrTxIntr=0x0080, IntrTxErr=0x0100,
+	IntrTxIdle=0x0200, IntrTxUnderrun=0x0400,
+	StatsMax=0x0800, IntrDrv=0x1000, WOLPkt=0x2000, LinkChange=0x4000,
+	RxStatusOverrun=0x10000,
+	RxResetDone=0x00200000, TxResetDone=0x00400000,
+	IntrPCIErr=0x001E0000,
+	IntrNormalSummary=0x0251, IntrAbnormalSummary=0xED20,
+};
+
+/* Bits in the RxMode register. */
+enum rx_mode_bits {
+	AcceptErr=0x20, AcceptRunt=0x10,
+	AcceptBroadcast=0xC0000000,
+	AcceptMulticast=0x00200000, AcceptAllMulticast=0x20000000,
+	AcceptAllPhys=0x10000000, AcceptMyPhys=0x08000000,
+};
+
+/* The Rx and Tx buffer descriptors. */
+/* Note that using only 32 bit fields simplifies conversion to big-endian
+   architectures. */
+struct netdev_desc {
+#if ADDRLEN == 64
+	u64 next_desc;
+	u64 buf_addr;
+#endif
+	u32 next_desc;
+	u32 buf_addr;
+	s32 cmd_status;
+	u32 vlan_status;
+};
+
+/* Bits in network_desc.status */
+enum desc_status_bits {
+	DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000,
+	DescNoCRC=0x10000000,
+	DescPktOK=0x08000000, RxTooLong=0x00400000,
+};
+
+#define PRIV_ALIGN	15	/* Required alignment mask */
+struct netdev_private {
+	/* Descriptor rings first for alignment. */
+	struct netdev_desc rx_ring[RX_RING_SIZE];
+	struct netdev_desc tx_ring[TX_RING_SIZE];
+	struct net_device *next_module;		/* Link for devices of this type. */
+	void *priv_addr;					/* Unaligned address for kfree */
+	const char *product_name;
+	/* The addresses of receive-in-place skbuffs. */
+	struct sk_buff* rx_skbuff[RX_RING_SIZE];
+	/* The saved address of a sent-in-place packet/buffer, for later free(). */
+	struct sk_buff* tx_skbuff[TX_RING_SIZE];
+	struct net_device_stats stats;
+	struct timer_list timer;	/* Media monitoring timer. */
+	/* Frequently used values: keep some adjacent for cache effect. */
+	int msg_level;
+	int chip_id, drv_flags;
+	struct pci_dev *pci_dev;
+	long in_interrupt;			/* Word-long for SMP locks. */
+	int max_interrupt_work;
+	int intr_enable;
+	unsigned int restore_intr_enable:1;	/* Set if temporarily masked.  */
+	unsigned int rx_q_empty:1;			/* Set out-of-skbuffs.  */
+
+	struct netdev_desc *rx_head_desc;
+	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
+	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
+	int rx_copybreak;
+
+	unsigned int cur_tx, dirty_tx;
+	unsigned int tx_full:1;				/* The Tx queue is full. */
+	/* These values keep track of the transceiver/media in use. */
+	unsigned int full_duplex:1;			/* Full-duplex operation requested. */
+	unsigned int duplex_lock:1;
+	unsigned int medialock:1;			/* Do not sense media. */
+	unsigned int default_port:4;		/* Last dev->if_port value. */
+	/* Rx filter. */
+	u32 cur_rx_mode;
+	u32 rx_filter[16];
+	int multicast_filter_limit;
+	/* FIFO and PCI burst thresholds. */
+	int tx_config, rx_config;
+	/* MII transceiver section. */
+	u16 advertising;					/* NWay media advertisement */
+};
+
+static int  eeprom_read(long ioaddr, int location);
+static void mdio_sync(long mdio_addr);
+static int  mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int  netdev_open(struct net_device *dev);
+static void check_duplex(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void tx_timeout(struct net_device *dev);
+static int  rx_ring_fill(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static int  start_tx(struct sk_buff *skb, struct net_device *dev);
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static void netdev_error(struct net_device *dev, int intr_status);
+static int  netdev_rx(struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int  netdev_close(struct net_device *dev);
+
+
+
+/* A list of our installed devices, for removing the driver module. */
+static struct net_device *root_net_dev = NULL;
+
+#ifndef MODULE
+int ns820_probe(struct net_device *dev)
+{
+	if (pci_drv_register(&natsemi_drv_id, dev) < 0)
+		return -ENODEV;
+	printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+	return 0;
+}
+#endif
+
+static void *ns820_probe1(struct pci_dev *pdev, void *init_dev,
+						  long ioaddr, int irq, int chip_idx, int card_idx)
+{
+	struct net_device *dev;
+	struct netdev_private *np;
+	void *priv_mem;
+	int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+
+	dev = init_etherdev(init_dev, 0);
+	if (!dev)
+		return NULL;
+
+	/* Perhaps NETIF_MSG_PROBE */
+	printk(KERN_INFO "%s: %s at 0x%lx, ",
+		   dev->name, pci_id_tbl[chip_idx].name, ioaddr);
+
+	for (i = 0; i < 3; i++)
+		((u16 *)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, 12 - i));
+	for (i = 0; i < 5; i++)
+		printk("%2.2x:", dev->dev_addr[i]);
+	printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+	/* Reset the chip to erase previous misconfiguration. */
+	writel(ChipReset, ioaddr + ChipCmd);
+	/* Power up Xcvr. */
+	writel(~CfgXcrOff & readl(ioaddr + ChipConfig), ioaddr + ChipConfig);
+
+	/* Make certain elements e.g. descriptor lists are aligned. */
+	priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+	/* Check for the very unlikely case of no memory. */
+	if (priv_mem == NULL)
+		return NULL;
+
+	dev->base_addr = ioaddr;
+	dev->irq = irq;
+
+	dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+	memset(np, 0, sizeof(*np));
+	np->priv_addr = priv_mem;
+
+	np->next_module = root_net_dev;
+	root_net_dev = dev;
+
+	np->pci_dev = pdev;
+	np->chip_id = chip_idx;
+	np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
+	np->msg_level = (1 << debug) - 1;
+	np->rx_copybreak = rx_copybreak;
+	np->max_interrupt_work = max_interrupt_work;
+	np->multicast_filter_limit = multicast_filter_limit;
+
+	if (dev->mem_start)
+		option = dev->mem_start;
+
+	/* The lower four bits are the media type. */
+	if (option > 0) {
+		if (option & 0x220)
+			np->full_duplex = 1;
+		np->default_port = option & 0x33ff;
+		if (np->default_port & 0x330)
+			np->medialock = 1;
+	}
+	if (card_idx < MAX_UNITS  &&  full_duplex[card_idx] > 0)
+		np->full_duplex = 1;
+
+	if (np->full_duplex) {
+		if (np->msg_level & NETIF_MSG_PROBE)
+			printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
+				   " disabled.\n", dev->name);
+		np->duplex_lock = 1;
+	}
+
+	/* The chip-specific entries in the device structure. */
+	dev->open = &netdev_open;
+	dev->hard_start_xmit = &start_tx;
+	dev->stop = &netdev_close;
+	dev->get_stats = &get_stats;
+	dev->set_multicast_list = &set_rx_mode;
+	dev->do_ioctl = &mii_ioctl;
+
+	/* Allow forcing the media type. */
+	if (option > 0) {
+		if (option & 0x220)
+			np->full_duplex = 1;
+		np->default_port = option & 0x3ff;
+		if (np->default_port & 0x330) {
+			np->medialock = 1;
+			if (np->msg_level & NETIF_MSG_PROBE)
+				printk(KERN_INFO "  Forcing %dMbs %s-duplex operation.\n",
+					   (option & 0x300 ? 100 : 10),
+					   (np->full_duplex ? "full" : "half"));
+			mdio_write(dev, 1, 0,
+					   ((option & 0x300) ? 0x2000 : 0) | 	/* 100mbps? */
+					   (np->full_duplex ? 0x0100 : 0)); /* Full duplex? */
+		}
+	}
+
+	return dev;
+}
+
+
+/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
+   The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses.
+   Update to the code in other drivers for 8/10 bit addresses.
+*/
+
+/* Delay between EEPROM clock transitions.
+   This "delay" forces out buffered PCI writes, which is sufficient to meet
+   the timing requirements of most EEPROMs.
+*/
+#define eeprom_delay(ee_addr)	readl(ee_addr)
+
+enum EEPROM_Ctrl_Bits {
+	EE_ShiftClk=0x04, EE_DataIn=0x01, EE_ChipSelect=0x08, EE_DataOut=0x02,
+};
+#define EE_Write0 (EE_ChipSelect)
+#define EE_Write1 (EE_ChipSelect | EE_DataIn)
+
+/* The EEPROM commands include the 01 preamble. */
+enum EEPROM_Cmds {
+	EE_WriteCmd=5, EE_ReadCmd=6, EE_EraseCmd=7,
+};
+
+static int eeprom_read(long addr, int location)
+{
+	long eeprom_addr = addr + EECtrl;
+	int read_cmd = (EE_ReadCmd << 6) | location;
+	int retval = 0;
+	int i;
+
+	writel(EE_Write0, eeprom_addr);
+
+	/* Shift the read command bits out. */
+	for (i = 10; i >= 0; i--) {
+		int dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
+		writel(dataval, eeprom_addr);
+		eeprom_delay(eeprom_addr);
+		writel(dataval | EE_ShiftClk, eeprom_addr);
+		eeprom_delay(eeprom_addr);
+	}
+	writel(EE_ChipSelect, eeprom_addr);
+	eeprom_delay(eeprom_addr);
+
+	for (i = 15; i >= 0; i--) {
+		writel(EE_ChipSelect | EE_ShiftClk, eeprom_addr);
+		eeprom_delay(eeprom_addr);
+		retval |= (readl(eeprom_addr) & EE_DataOut) ? 1 << i : 0;
+		writel(EE_ChipSelect, eeprom_addr);
+		eeprom_delay(eeprom_addr);
+	}
+
+	/* Terminate the EEPROM access. */
+	writel(EE_Write0, eeprom_addr);
+	writel(0, eeprom_addr);
+	return retval;
+}
+
+/*  MII transceiver control section.
+	Read and write MII registers using software-generated serial MDIO
+	protocol.  See the MII specifications or DP83840A data sheet for details.
+
+	The maximum data clock rate is 2.5 Mhz.  To meet minimum timing we
+	must flush writes to the PCI bus with a PCI read. */
+#define mdio_delay(mdio_addr) readl(mdio_addr)
+
+/* Set iff a MII transceiver on any interface requires mdio preamble.
+   This only set with older tranceivers, so the extra
+   code size of a per-interface flag is not worthwhile. */
+static char mii_preamble_required = 0;
+
+enum mii_reg_bits {
+	MDIO_ShiftClk=0x0040, MDIO_Data=0x0010, MDIO_EnbOutput=0x0020,
+};
+#define MDIO_EnbIn  (0)
+#define MDIO_WRITE0 (MDIO_EnbOutput)
+#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
+
+/* Generate the preamble required for initial synchronization and
+   a few older transceivers. */
+static void mdio_sync(long mdio_addr)
+{
+	int bits = 32;
+
+	/* Establish sync by sending at least 32 logic ones. */
+	while (--bits >= 0) {
+		writel(MDIO_WRITE1, mdio_addr);
+		mdio_delay(mdio_addr);
+		writel(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
+		mdio_delay(mdio_addr);
+	}
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+	long mdio_addr = dev->base_addr + EECtrl;
+	int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+	int i, retval = 0;
+
+	if (mii_preamble_required)
+		mdio_sync(mdio_addr);
+
+	/* Shift the read command bits out. */
+	for (i = 15; i >= 0; i--) {
+		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+		writel(dataval, mdio_addr);
+		mdio_delay(mdio_addr);
+		writel(dataval | MDIO_ShiftClk, mdio_addr);
+		mdio_delay(mdio_addr);
+	}
+	/* Read the two transition, 16 data, and wire-idle bits. */
+	for (i = 19; i > 0; i--) {
+		writel(MDIO_EnbIn, mdio_addr);
+		mdio_delay(mdio_addr);
+		retval = (retval << 1) | ((readl(mdio_addr) & MDIO_Data) ? 1 : 0);
+		writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+		mdio_delay(mdio_addr);
+	}
+	return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
+{
+	long mdio_addr = dev->base_addr + EECtrl;
+	int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
+	int i;
+
+	if (mii_preamble_required)
+		mdio_sync(mdio_addr);
+
+	/* Shift the command bits out. */
+	for (i = 31; i >= 0; i--) {
+		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+		writel(dataval, mdio_addr);
+		mdio_delay(mdio_addr);
+		writel(dataval | MDIO_ShiftClk, mdio_addr);
+		mdio_delay(mdio_addr);
+	}
+	/* Clear out extra bits. */
+	for (i = 2; i > 0; i--) {
+		writel(MDIO_EnbIn, mdio_addr);
+		mdio_delay(mdio_addr);
+		writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+		mdio_delay(mdio_addr);
+	}
+	return;
+}
+
+static int netdev_open(struct net_device *dev)
+{
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int i;
+	u32 intr_status = readl(ioaddr + IntrStatus);
+
+	/* We have not yet encountered a case where we need to reset the chip. */
+
+	MOD_INC_USE_COUNT;
+
+	if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
+		MOD_DEC_USE_COUNT;
+		return -EAGAIN;
+	}
+
+	/* Power up Xcvr. */
+	writel((~CfgXcrOff & readl(ioaddr + ChipConfig)) | 0x00400000,
+		   ioaddr + ChipConfig);
+	if (np->msg_level & NETIF_MSG_IFUP)
+		printk(KERN_DEBUG "%s: netdev_open() irq %d intr_status %8.8x.\n",
+			   dev->name, dev->irq, intr_status);
+
+	init_ring(dev);
+
+#if defined(ADDR_64BITS) && defined(__alpha__)
+	writel(virt_to_bus(np->rx_ring) >> 32, ioaddr + RxRingPtrHi);
+	writel(virt_to_bus(np->tx_ring) >> 32, ioaddr + TxRingPtrHi);
+#else
+	writel(0, ioaddr + RxRingPtrHi);
+	writel(0, ioaddr + TxRingPtrHi);
+#endif
+	writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
+	writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
+
+	for (i = 0; i < 6; i += 2) {
+		writel(i, ioaddr + RxFilterAddr);
+		writew(dev->dev_addr[i] + (dev->dev_addr[i+1] << 8),
+			   ioaddr + RxFilterData);
+	}
+
+	/* Initialize other registers. */
+	/* Configure the PCI bus bursts and FIFO thresholds. */
+	/* Configure for standard, in-spec Ethernet. */
+
+	if (np->full_duplex  ||
+		((readl(ioaddr + ChipConfig) & CfgFDX) == 0) ^
+		((np->drv_flags & FDXActiveLow) != 0)) {
+		np->tx_config = 0xD0801002;
+		np->rx_config = 0x10000020;
+	} else {
+		np->tx_config = 0x10801002;
+		np->rx_config = 0x0020;
+	}
+	if (dev->mtu > 1500)
+		np->rx_config |= 0x08000000;
+	writel(np->tx_config, ioaddr + TxConfig);
+	writel(np->rx_config, ioaddr + RxConfig);
+	if (np->msg_level & NETIF_MSG_IFUP)
+		printk(KERN_DEBUG "%s: Setting TxConfig to %8.8x.\n",
+			   dev->name, (int)readl(ioaddr + TxConfig));
+
+	if (dev->if_port == 0)
+		dev->if_port = np->default_port;
+
+	np->in_interrupt = 0;
+
+	check_duplex(dev);
+	set_rx_mode(dev);
+	netif_start_tx_queue(dev);
+
+	/* Enable interrupts by setting the interrupt mask. */
+	np->intr_enable = IntrNormalSummary | IntrAbnormalSummary | 0x1f;
+	writel(np->intr_enable, ioaddr + IntrMask);
+	writel(1, ioaddr + IntrEnable);
+
+	writel(RxOn | TxOn, ioaddr + ChipCmd);
+	writel(4, ioaddr + StatsCtrl);					/* Clear Stats */
+
+	if (np->msg_level & NETIF_MSG_IFUP)
+		printk(KERN_DEBUG "%s: Done netdev_open(), status: %x.\n",
+			   dev->name, (int)readl(ioaddr + ChipCmd));
+
+	/* Set the timer to check for link beat. */
+	init_timer(&np->timer);
+	np->timer.expires = jiffies + 3*HZ;
+	np->timer.data = (unsigned long)dev;
+	np->timer.function = &netdev_timer;				/* timer handler */
+	add_timer(&np->timer);
+
+	return 0;
+}
+
+static void check_duplex(struct net_device *dev)
+{
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int duplex;
+
+	if (np->duplex_lock)
+		return;
+	duplex = readl(ioaddr + ChipConfig) & CfgFDX ? 1 : 0;
+	if (np->full_duplex != duplex) {
+		np->full_duplex = duplex;
+		if (np->msg_level & NETIF_MSG_LINK)
+			printk(KERN_INFO "%s: Setting %s-duplex based on negotiated link"
+				   " capability.\n", dev->name,
+				   duplex ? "full" : "half");
+		if (duplex) {
+			np->rx_config |= 0x10000000;
+			np->tx_config |= 0xC0000000;
+		} else {
+			np->rx_config &= ~0x10000000;
+			np->tx_config &= ~0xC0000000;
+		}
+		writel(np->tx_config, ioaddr + TxConfig);
+		writel(np->rx_config, ioaddr + RxConfig);
+		if (np->msg_level & NETIF_MSG_LINK)
+			printk(KERN_DEBUG "%s: Setting TxConfig to %8.8x (%8.8x).\n",
+				   dev->name, np->tx_config, (int)readl(ioaddr + TxConfig));
+	}
+}
+
+static void netdev_timer(unsigned long data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int next_tick = 10*HZ;
+
+	if (np->msg_level & NETIF_MSG_TIMER)
+		printk(KERN_DEBUG "%s: Driver monitor timer tick, status %8.8x.\n",
+			   dev->name, (int)readl(ioaddr + ChipConfig));
+	if (np->rx_q_empty) {
+		/* Trigger an interrupt to refill. */
+		writel(SoftIntr, ioaddr + ChipCmd);
+	}
+	if (netif_queue_paused(dev)  &&
+		np->cur_tx - np->dirty_tx > 1  &&
+		(jiffies - dev->trans_start) > TX_TIMEOUT) {
+		tx_timeout(dev);
+	}
+	check_duplex(dev);
+	np->timer.expires = jiffies + next_tick;
+	add_timer(&np->timer);
+}
+
+static void tx_timeout(struct net_device *dev)
+{
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+
+	printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
+		   " resetting...\n", dev->name, (int)readl(ioaddr + TxRingPtr));
+
+	if (np->msg_level & NETIF_MSG_TX_ERR) {
+		int i;
+		printk(KERN_DEBUG "  Rx ring %p: ", np->rx_ring);
+		for (i = 0; i < RX_RING_SIZE; i++)
+			printk(" %8.8x", (unsigned int)np->rx_ring[i].cmd_status);
+		printk("\n"KERN_DEBUG"  Tx ring %p: ", np->tx_ring);
+		for (i = 0; i < TX_RING_SIZE; i++)
+			printk(" %4.4x", np->tx_ring[i].cmd_status);
+		printk("\n");
+	}
+
+	/* Perhaps we should reinitialize the hardware here. */
+	dev->if_port = 0;
+	/* Stop and restart the chip's Tx processes . */
+
+	/* Trigger an immediate transmit demand. */
+
+	dev->trans_start = jiffies;
+	np->stats.tx_errors++;
+	return;
+}
+
+/* Refill the Rx ring buffers, returning non-zero if not full. */
+static int rx_ring_fill(struct net_device *dev)
+{
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	unsigned int entry;
+
+	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+		entry = np->dirty_rx % RX_RING_SIZE;
+		if (np->rx_skbuff[entry] == NULL) {
+			struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+			np->rx_skbuff[entry] = skb;
+			if (skb == NULL)
+				return 1;				/* Better luck next time. */
+			skb->dev = dev;			/* Mark as being used by this device. */
+			np->rx_ring[entry].buf_addr = virt_to_bus(skb->tail);
+		}
+		np->rx_ring[entry].cmd_status = cpu_to_le32(DescIntr | np->rx_buf_sz);
+	}
+	return 0;
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	int i;
+
+	np->tx_full = 0;
+	np->cur_rx = np->cur_tx = 0;
+	np->dirty_rx = np->dirty_tx = 0;
+
+	/* MAX(PKT_BUF_SZ, dev->mtu + 8); */
+	/* I know you _want_ to change this without understanding it.  Don't. */
+	np->rx_buf_sz = (dev->mtu <= 1532 ? PKT_BUF_SZ : dev->mtu + 8);
+	np->rx_head_desc = &np->rx_ring[0];
+
+	/* Initialize all Rx descriptors. */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		np->rx_ring[i].next_desc = virt_to_bus(&np->rx_ring[i+1]);
+		np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
+		np->rx_skbuff[i] = 0;
+	}
+	/* Mark the last entry as wrapping the ring. */
+	np->rx_ring[i-1].next_desc = virt_to_bus(&np->rx_ring[0]);
+
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		np->tx_skbuff[i] = 0;
+		np->tx_ring[i].next_desc = virt_to_bus(&np->tx_ring[i+1]);
+		np->tx_ring[i].cmd_status = 0;
+	}
+	np->tx_ring[i-1].next_desc = virt_to_bus(&np->tx_ring[0]);
+
+	/* Fill in the Rx buffers.
+	   Allocation failure just leaves a "negative" np->dirty_rx. */
+	np->dirty_rx = (unsigned int)(0 - RX_RING_SIZE);
+	rx_ring_fill(dev);
+
+	return;
+}
+
+static int start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	unsigned int entry;
+
+	/* Block a timer-based transmit from overlapping.  This happens when
+	   packets are presumed lost, and we use this check the Tx status. */
+	if (netif_pause_tx_queue(dev) != 0) {
+		/* This watchdog code is redundant with the media monitor timer. */
+		if (jiffies - dev->trans_start > TX_TIMEOUT)
+			tx_timeout(dev);
+		return 1;
+	}
+
+	/* Note: Ordering is important here, set the field with the
+	   "ownership" bit last, and only then increment cur_tx.
+	   No spinlock is needed for either Tx or Rx.
+	*/
+
+	/* Calculate the next Tx descriptor entry. */
+	entry = np->cur_tx % TX_RING_SIZE;
+
+	np->tx_skbuff[entry] = skb;
+
+	np->tx_ring[entry].buf_addr = virt_to_bus(skb->data);
+	np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn|DescIntr | skb->len);
+	np->cur_tx++;
+
+	/* StrongARM: Explicitly cache flush np->tx_ring and skb->data,skb->len. */
+
+	if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
+		np->tx_full = 1;
+		/* Check for a just-cleared queue. */
+		if (np->cur_tx - (volatile unsigned int)np->dirty_tx
+			< TX_QUEUE_LEN - 4) {
+			np->tx_full = 0;
+			netif_unpause_tx_queue(dev);
+		} else
+			netif_stop_tx_queue(dev);
+	} else
+		netif_unpause_tx_queue(dev);		/* Typical path */
+	/* Wake the potentially-idle transmit channel. */
+	writel(TxOn, dev->base_addr + ChipCmd);
+
+	dev->trans_start = jiffies;
+
+	if (np->msg_level & NETIF_MSG_TX_QUEUED) {
+		printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
+			   dev->name, np->cur_tx, entry);
+	}
+	return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+   after the Tx thread. */
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+	struct net_device *dev = (struct net_device *)dev_instance;
+	struct netdev_private *np;
+	long ioaddr;
+	int boguscnt;
+
+#ifndef final_version			/* Can never occur. */
+	if (dev == NULL) {
+		printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown "
+				"device.\n", irq);
+		return;
+	}
+#endif
+
+	ioaddr = dev->base_addr;
+	np = (struct netdev_private *)dev->priv;
+	boguscnt = np->max_interrupt_work;
+
+#if defined(__i386__)  &&  LINUX_VERSION_CODE < 0x020300
+	/* A lock to prevent simultaneous entry bug on Intel SMP machines. */
+	if (test_and_set_bit(0, (void*)&dev->interrupt)) {
+		printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
+			   dev->name);
+		dev->interrupt = 0;	/* Avoid halting machine. */
+		return;
+	}
+#endif
+
+	do {
+		u32 intr_status = readl(ioaddr + IntrStatus);
+
+		if (np->msg_level & NETIF_MSG_INTR)
+			printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
+				   dev->name, intr_status);
+
+		if (intr_status == 0 || intr_status == 0xffffffff)
+			break;
+
+		/* Acknowledge all of the current interrupt sources ASAP.
+		   Nominally the read above accomplishes this, but... */
+		writel(intr_status & 0x001ffff, ioaddr + IntrStatus);
+
+		if (intr_status & (IntrRxDone | IntrRxIntr)) {
+			netdev_rx(dev);
+			np->rx_q_empty = rx_ring_fill(dev);
+		}
+
+		if (intr_status & (IntrRxIdle | IntrDrv)) {
+			unsigned int old_dirty_rx = np->dirty_rx;
+			if (rx_ring_fill(dev) == 0)
+				np->rx_q_empty = 0;
+			/* Restart Rx engine iff we did add a buffer. */
+			if (np->dirty_rx != old_dirty_rx)
+				writel(RxOn, dev->base_addr + ChipCmd);
+		}
+
+		for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+			int entry = np->dirty_tx % TX_RING_SIZE;
+			if (np->msg_level & NETIF_MSG_INTR)
+				printk(KERN_DEBUG "%s: Tx entry %d @%p status %8.8x.\n",
+					   dev->name, entry, &np->tx_ring[entry],
+					   np->tx_ring[entry].cmd_status);
+			if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
+				break;
+			if (np->tx_ring[entry].cmd_status & cpu_to_le32(0x08000000)) {
+				if (np->msg_level & NETIF_MSG_TX_DONE)
+					printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
+						   dev->name, np->tx_ring[entry].cmd_status);
+				np->stats.tx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+				np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+#endif
+			} else {			/* Various Tx errors */
+				int tx_status = le32_to_cpu(np->tx_ring[entry].cmd_status);
+				if (tx_status & 0x04010000) np->stats.tx_aborted_errors++;
+				if (tx_status & 0x02000000) np->stats.tx_fifo_errors++;
+				if (tx_status & 0x01000000) np->stats.tx_carrier_errors++;
+				if (tx_status & 0x00200000) np->stats.tx_window_errors++;
+				if (np->msg_level & NETIF_MSG_TX_ERR)
+					printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+						   dev->name, tx_status);
+				np->stats.tx_errors++;
+			}
+			/* Free the original skb. */
+			dev_free_skb_irq(np->tx_skbuff[entry]);
+			np->tx_skbuff[entry] = 0;
+		}
+		/* Note the 4 slot hysteresis to mark the queue non-full. */
+		if (np->tx_full
+			&& np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+			/* The ring is no longer full, allow new TX entries. */
+			np->tx_full = 0;
+			netif_resume_tx_queue(dev);
+		}
+
+		/* Abnormal error summary/uncommon events handlers. */
+		if (intr_status & IntrAbnormalSummary)
+			netdev_error(dev, intr_status);
+
+		if (--boguscnt < 0) {
+			printk(KERN_WARNING "%s: Too much work at interrupt, "
+				   "status=0x%4.4x.\n",
+				   dev->name, intr_status);
+			np->restore_intr_enable = 1;
+			break;
+		}
+	} while (1);
+
+	if (np->msg_level & NETIF_MSG_INTR)
+		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+			   dev->name, (int)readl(ioaddr + IntrStatus));
+
+#if defined(__i386__)  &&  LINUX_VERSION_CODE < 0x020300
+	clear_bit(0, (void*)&dev->interrupt);
+#endif
+	return;
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+   for clarity and better register allocation. */
+static int netdev_rx(struct net_device *dev)
+{
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	int entry = np->cur_rx % RX_RING_SIZE;
+	int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
+	s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
+
+	/* If the driver owns the next entry it's a new packet. Send it up. */
+	while (desc_status < 0) {        /* e.g. & DescOwn */
+		if (np->msg_level & NETIF_MSG_RX_STATUS)
+			printk(KERN_DEBUG "  In netdev_rx() entry %d status was %8.8x.\n",
+				   entry, desc_status);
+		if (--boguscnt < 0)
+			break;
+		if ((desc_status & (DescMore|DescPktOK|RxTooLong)) != DescPktOK) {
+			if (desc_status & DescMore) {
+				printk(KERN_WARNING "%s: Oversized(?) Ethernet frame spanned "
+					   "multiple buffers, entry %#x status %x.\n",
+					   dev->name, np->cur_rx, desc_status);
+				np->stats.rx_length_errors++;
+			} else {
+				/* There was a error. */
+				if (np->msg_level & NETIF_MSG_RX_ERR)
+					printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
+						   desc_status);
+				np->stats.rx_errors++;
+				if (desc_status & 0x06000000) np->stats.rx_over_errors++;
+				if (desc_status & 0x00600000) np->stats.rx_length_errors++;
+				if (desc_status & 0x00140000) np->stats.rx_frame_errors++;
+				if (desc_status & 0x00080000) np->stats.rx_crc_errors++;
+			}
+		} else {
+			struct sk_buff *skb;
+			int pkt_len = (desc_status & 0x0fff) - 4;	/* Omit CRC size. */
+			/* Check if the packet is long enough to accept without copying
+			   to a minimally-sized skbuff. */
+			if (pkt_len < np->rx_copybreak
+				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+				skb->dev = dev;
+				skb_reserve(skb, 2);	/* 16 byte align the IP header */
+#if HAS_IP_COPYSUM
+				eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+				skb_put(skb, pkt_len);
+#else
+				memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
+					   pkt_len);
+#endif
+			} else {
+				skb_put(skb = np->rx_skbuff[entry], pkt_len);
+				np->rx_skbuff[entry] = NULL;
+			}
+#ifndef final_version				/* Remove after testing. */
+			/* You will want this info for the initial debug. */
+			if (np->msg_level & NETIF_MSG_PKTDATA)
+				printk(KERN_DEBUG "  Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
+					   "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
+					   "%d.%d.%d.%d.\n",
+					   skb->data[0], skb->data[1], skb->data[2], skb->data[3],
+					   skb->data[4], skb->data[5], skb->data[6], skb->data[7],
+					   skb->data[8], skb->data[9], skb->data[10],
+					   skb->data[11], skb->data[12], skb->data[13],
+					   skb->data[14], skb->data[15], skb->data[16],
+					   skb->data[17]);
+#endif
+			skb->protocol = eth_type_trans(skb, dev);
+			/* W/ hardware checksum: skb->ip_summed = CHECKSUM_UNNECESSARY; */
+			netif_rx(skb);
+			dev->last_rx = jiffies;
+			np->stats.rx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+			np->stats.rx_bytes += pkt_len;
+#endif
+		}
+		entry = (++np->cur_rx) % RX_RING_SIZE;
+		np->rx_head_desc = &np->rx_ring[entry];
+		desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
+	}
+
+	/* Refill is now done in the main interrupt loop. */
+	return 0;
+}
+
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+
+	if (intr_status & LinkChange) {
+		int chip_config = readl(ioaddr + ChipConfig);
+		if (np->msg_level & NETIF_MSG_LINK)
+			printk(KERN_NOTICE "%s: Link changed: Autonegotiation advertising"
+				   " %4.4x  partner %4.4x.\n", dev->name,
+				   (int)readl(ioaddr + 0x90), (int)readl(ioaddr + 0x94));
+		if (chip_config & CfgLinkGood)
+			netif_link_up(dev);
+		else
+			netif_link_down(dev);
+		check_duplex(dev);
+	}
+	if (intr_status & StatsMax) {
+		get_stats(dev);
+	}
+	if (intr_status & IntrTxUnderrun) {
+		/* Increase the Tx threshold, 32 byte units. */
+		if ((np->tx_config & 0x3f) < 62)
+			np->tx_config += 2;			/* +64 bytes */
+		writel(np->tx_config, ioaddr + TxConfig);
+	}
+	if (intr_status & WOLPkt) {
+		int wol_status = readl(ioaddr + WOLCmd);
+		printk(KERN_NOTICE "%s: Link wake-up event %8.8x",
+			   dev->name, wol_status);
+	}
+	if (intr_status & (RxStatusOverrun | RxStatusOverrun)) {
+		if (np->msg_level & NETIF_MSG_DRV)
+			printk(KERN_ERR "%s: Rx overflow! ns820 %8.8x.\n",
+				   dev->name, intr_status);
+		np->stats.rx_fifo_errors++;
+	}
+	if (intr_status & ~(LinkChange|StatsMax|RxResetDone|TxResetDone|
+						RxStatusOverrun|0xA7ff)) {
+		if (np->msg_level & NETIF_MSG_DRV)
+			printk(KERN_ERR "%s: Something Wicked happened! ns820 %8.8x.\n",
+				   dev->name, intr_status);
+	}
+	/* Hmmmmm, it's not clear how to recover from PCI faults. */
+	if (intr_status & IntrPCIErr) {
+		np->stats.tx_fifo_errors++;
+		np->stats.rx_fifo_errors++;
+	}
+}
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+	long ioaddr = dev->base_addr;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	int crc_errs = readl(ioaddr + RxCRCErrs);
+
+	if (crc_errs != 0xffffffff) {
+		/* We need not lock this segment of code for SMP.
+		   There is no atomic-add vulnerability for most CPUs,
+		   and statistics are non-critical. */
+		/* The chip only need report frame silently dropped. */
+		np->stats.rx_crc_errors	+= crc_errs;
+		np->stats.rx_missed_errors += readl(ioaddr + RxMissed);
+	}
+
+	return &np->stats;
+}
+
+/* The little-endian AUTODIN II ethernet CRC calculations.
+   A big-endian version is also available.
+   This is slow but compact code.  Do not use this routine for bulk data,
+   use a table-based routine instead.
+   This is common code and should be moved to net/core/crc.c.
+   Chips may use the upper or lower CRC bits, and may reverse and/or invert
+   them.  Select the endian-ness that results in minimal calculations.
+*/
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+static inline unsigned ether_crc_le(int length, unsigned char *data)
+{
+	unsigned int crc = 0xffffffff;	/* Initial value. */
+	while(--length >= 0) {
+		unsigned char current_octet = *data++;
+		int bit;
+		for (bit = 8; --bit >= 0; current_octet >>= 1) {
+			if ((crc ^ current_octet) & 1) {
+				crc >>= 1;
+				crc ^= ethernet_polynomial_le;
+			} else
+				crc >>= 1;
+		}
+	}
+	return crc;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+	long ioaddr = dev->base_addr;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	u8 mc_filter[64];			/* Multicast hash filter */
+	u32 rx_mode;
+
+	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
+		/* Unconditionally log net taps. */
+		printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+		rx_mode = AcceptBroadcast | AcceptAllMulticast | AcceptAllPhys
+			| AcceptMyPhys;
+	} else if ((dev->mc_count > np->multicast_filter_limit)
+			   ||  (dev->flags & IFF_ALLMULTI)) {
+		rx_mode = AcceptBroadcast | AcceptAllMulticast | AcceptMyPhys;
+	} else {
+		struct dev_mc_list *mclist;
+		int i;
+		memset(mc_filter, 0, sizeof(mc_filter));
+		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+			 i++, mclist = mclist->next) {
+			set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x7ff,
+					mc_filter);
+		}
+		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+		for (i = 0; i < 64; i += 2) {
+			writel(rx_mode + 0x200 + i, ioaddr + RxFilterAddr);
+			writew((mc_filter[i+1]<<8) + mc_filter[i], ioaddr + RxFilterData);
+		}
+	}
+	writel(rx_mode, ioaddr + RxFilterAddr);
+	np->cur_rx_mode = rx_mode;
+}
+
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	u16 *data = (u16 *)&rq->ifr_data;
+	u32 *data32 = (void *)&rq->ifr_data;
+
+	switch(cmd) {
+	case 0x8947: case 0x89F0:
+		/* SIOCGMIIPHY: Get the address of the PHY in use. */
+		data[0] = 1;
+		/* Fall Through */
+	case 0x8948: case 0x89F1:
+		/* SIOCGMIIREG: Read the specified MII register. */
+		data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
+		return 0;
+	case 0x8949: case 0x89F2:
+		/* SIOCSMIIREG: Write the specified MII register */
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		if (data[0] == 1) {
+			u16 miireg = data[1] & 0x1f;
+			u16 value = data[2];
+			switch (miireg) {
+			case 0:
+				/* Check for autonegotiation on or reset. */
+				np->duplex_lock = (value & 0x9000) ? 0 : 1;
+				if (np->duplex_lock)
+					np->full_duplex = (value & 0x0100) ? 1 : 0;
+				break;
+			case 4: np->advertising = value; break;
+			}
+		}
+		mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+		return 0;
+	case SIOCGPARAMS:
+		data32[0] = np->msg_level;
+		data32[1] = np->multicast_filter_limit;
+		data32[2] = np->max_interrupt_work;
+		data32[3] = np->rx_copybreak;
+		return 0;
+	case SIOCSPARAMS:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		np->msg_level = data32[0];
+		np->multicast_filter_limit = data32[1];
+		np->max_interrupt_work = data32[2];
+		np->rx_copybreak = data32[3];
+		return 0;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int netdev_close(struct net_device *dev)
+{
+	long ioaddr = dev->base_addr;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	int i;
+
+	netif_stop_tx_queue(dev);
+
+	if (np->msg_level & NETIF_MSG_IFDOWN) {
+		printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x "
+			   "Int %2.2x.\n",
+			   dev->name, (int)readl(ioaddr + ChipCmd),
+			   (int)readl(ioaddr + IntrStatus));
+		printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
+			   dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
+	}
+
+	/* We don't want the timer to re-start anything. */
+	del_timer(&np->timer);
+
+	/* Disable interrupts using the mask. */
+	writel(0, ioaddr + IntrMask);
+	writel(0, ioaddr + IntrEnable);
+	writel(2, ioaddr + StatsCtrl);					/* Freeze Stats */
+
+	/* Stop the chip's Tx and Rx processes. */
+	writel(RxOff | TxOff, ioaddr + ChipCmd);
+
+	get_stats(dev);
+
+#ifdef __i386__
+	if (np->msg_level & NETIF_MSG_IFDOWN) {
+		printk("\n"KERN_DEBUG"  Tx ring at %8.8x:\n",
+			   (int)virt_to_bus(np->tx_ring));
+		for (i = 0; i < TX_RING_SIZE; i++)
+			printk(" #%d desc. %8.8x %8.8x.\n",
+				   i, np->tx_ring[i].cmd_status, (u32)np->tx_ring[i].buf_addr);
+		printk("\n"KERN_DEBUG "  Rx ring %8.8x:\n",
+			   (int)virt_to_bus(np->rx_ring));
+		for (i = 0; i < RX_RING_SIZE; i++) {
+			printk(KERN_DEBUG " #%d desc. %8.8x %8.8x\n",
+				   i, np->rx_ring[i].cmd_status, (u32)np->rx_ring[i].buf_addr);
+		}
+	}
+#endif /* __i386__ debugging only */
+
+	free_irq(dev->irq, dev);
+
+	/* Free all the skbuffs in the Rx queue. */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		np->rx_ring[i].cmd_status = 0;
+		np->rx_ring[i].buf_addr = 0xBADF00D0; /* An invalid address. */
+		if (np->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+			np->rx_skbuff[i]->free = 1;
+#endif
+			dev_free_skb(np->rx_skbuff[i]);
+		}
+		np->rx_skbuff[i] = 0;
+	}
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		if (np->tx_skbuff[i])
+			dev_free_skb(np->tx_skbuff[i]);
+		np->tx_skbuff[i] = 0;
+	}
+
+	/* Power down Xcvr. */
+	writel(CfgXcrOff | readl(ioaddr + ChipConfig), ioaddr + ChipConfig);
+
+	MOD_DEC_USE_COUNT;
+
+	return 0;
+}
+
+static int power_event(void *dev_instance, int event)
+{
+	struct net_device *dev = dev_instance;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+
+	if (np->msg_level & NETIF_MSG_LINK)
+		printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
+	switch(event) {
+	case DRV_ATTACH:
+		MOD_INC_USE_COUNT;
+		break;
+	case DRV_SUSPEND:
+		/* Disable interrupts, freeze stats, stop Tx and Rx. */
+		writel(0, ioaddr + IntrEnable);
+		writel(2, ioaddr + StatsCtrl);
+		writel(RxOff | TxOff, ioaddr + ChipCmd);
+		writel(CfgXcrOff | readl(ioaddr + ChipConfig), ioaddr + ChipConfig);
+		break;
+	case DRV_RESUME:
+		/* This is incomplete: the open() actions should be repeated. */
+		writel(~CfgXcrOff & readl(ioaddr + ChipConfig), ioaddr + ChipConfig);
+		set_rx_mode(dev);
+		writel(np->intr_enable, ioaddr + IntrEnable);
+		writel(1, ioaddr + IntrEnable);
+		writel(RxOn | TxOn, ioaddr + ChipCmd);
+		break;
+	case DRV_DETACH: {
+		struct net_device **devp, **next;
+		if (dev->flags & IFF_UP) {
+			/* Some, but not all, kernel versions close automatically. */
+			dev_close(dev);
+			dev->flags &= ~(IFF_UP|IFF_RUNNING);
+		}
+		unregister_netdev(dev);
+		release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
+		for (devp = &root_net_dev; *devp; devp = next) {
+			next = &((struct netdev_private *)(*devp)->priv)->next_module;
+			if (*devp == dev) {
+				*devp = *next;
+				break;
+			}
+		}
+		if (np->priv_addr)
+			kfree(np->priv_addr);
+		kfree(dev);
+		MOD_DEC_USE_COUNT;
+		break;
+	}
+	}
+
+	return 0;
+}
+
+
+#ifdef MODULE
+int init_module(void)
+{
+	/* Emit version even if no cards detected. */
+	printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+#ifdef CARDBUS
+	register_driver(&etherdev_ops);
+	return 0;
+#else
+	return pci_drv_register(&natsemi_drv_id, NULL);
+#endif
+}
+
+void cleanup_module(void)
+{
+	struct net_device *next_dev;
+
+#ifdef CARDBUS
+	unregister_driver(&etherdev_ops);
+#else
+	pci_drv_unregister(&natsemi_drv_id);
+#endif
+
+	/* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+	while (root_net_dev) {
+		struct netdev_private *np = (void *)(root_net_dev->priv);
+		unregister_netdev(root_net_dev);
+		iounmap((char *)root_net_dev->base_addr);
+		next_dev = np->next_module;
+		if (np->priv_addr)
+			kfree(np->priv_addr);
+		kfree(root_net_dev);
+		root_net_dev = next_dev;
+	}
+}
+
+#endif  /* MODULE */
+
+/*
+ * Local variables:
+ *  compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c ns820.c"
+ *  simple-compile-command: "gcc -DMODULE -O6 -c ns820.c"
+ *  c-indent-level: 4
+ *  c-basic-offset: 4
+ *  tab-width: 4
+ * End:
+ */
diff -uNr net/drivers/net/pci-scan.c linux-2.4.20/drivers/net/pci-scan.c
--- net/drivers/net/pci-scan.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.4.20/drivers/net/pci-scan.c	2003-01-14 20:29:34.000000000 -0500
@@ -0,0 +1,658 @@
+/* pci-scan.c: Linux PCI network adapter support code. */
+/*
+	Originally written 1999-2002 by Donald Becker.
+
+	This software may be used and distributed according to the terms
+	of the GNU General Public License (GPL), incorporated herein by
+	reference.  Drivers interacting with these functions are derivative
+	works and thus are covered the GPL.  They must include an explicit
+	GPL notice.
+
+	This code provides common scan and activate functions for PCI network
+	interfaces.
+
+	The author may be reached as becker@scyld.com, or
+	Donald Becker
+	Scyld Computing Corporation
+	410 Severn Ave., Suite 210
+	Annapolis MD 21403
+
+	Other contributers:
+*/
+static const char version[] =
+"pci-scan.c:v1.11 8/31/2002  Donald Becker <becker@scyld.com>"
+" http://www.scyld.com/linux/drivers.html\n";
+
+/* A few user-configurable values that may be modified when a module. */
+
+static int msg_level = 1;		/* 1 normal messages, 0 quiet .. 7 verbose. */
+static int min_pci_latency = 32;
+
+#if ! defined(__KERNEL__)
+#define __KERNEL__ 1
+#endif
+#if !defined(__OPTIMIZE__)
+#warning  You must compile this file with the correct options!
+#warning  See the last lines of the source file.
+#error You must compile this driver with the proper options, including "-O".
+#endif
+
+#if defined(MODULE) && ! defined(EXPORT_SYMTAB)
+#define EXPORT_SYMTAB
+#endif
+
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#include <linux/modversions.h>
+#if LINUX_VERSION_CODE < 0x20500  &&  defined(MODVERSIONS)
+/* Another interface semantics screw-up. */
+#include <linux/module.h>
+#include <linux/modversions.h>
+#else
+#include <linux/modversions.h>
+#include <linux/module.h>
+#endif
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20300
+/* Bogus change in the middle of a "stable" kernel series.
+   Also, in 2.4.7+ slab must come before interrupt.h to avoid breakage. */
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <asm/io.h>
+#include "pci-scan.h"
+#include "kern_compat.h"
+#if defined(CONFIG_APM)  &&  LINUX_VERSION_CODE < 0x20400 
+#include <linux/apm_bios.h>
+#endif
+#ifdef CONFIG_PM
+/* New in 2.4 kernels, pointlessly incompatible with earlier APM. */
+#include <linux/pm.h>
+#endif
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+#if (LINUX_VERSION_CODE < 0x20100)
+#define PCI_CAPABILITY_LIST	0x34	/* Offset of first capability list entry */
+#define PCI_STATUS_CAP_LIST	0x10	/* Support Capability List */
+#define PCI_CAP_ID_PM		0x01	/* Power Management */
+#endif
+
+int (*register_hotswap_hook)(struct drv_id_info *did);
+void (*unregister_hotswap_hook)(struct drv_id_info *did);
+
+#if LINUX_VERSION_CODE > 0x20118  &&  defined(MODULE)
+MODULE_LICENSE("GPL");
+MODULE_PARM(msg_level, "i");
+MODULE_PARM(min_pci_latency, "i");
+MODULE_PARM_DESC(msg_level, "Enable additional status messages (0-7)");
+MODULE_PARM_DESC(min_pci_latency,
+				 "Minimum value for the PCI Latency Timer settings");
+#if defined(EXPORT_SYMTAB)
+EXPORT_SYMBOL_NOVERS(pci_drv_register);
+EXPORT_SYMBOL_NOVERS(pci_drv_unregister);
+EXPORT_SYMBOL_NOVERS(acpi_wake);
+EXPORT_SYMBOL_NOVERS(acpi_set_pwr_state);
+EXPORT_SYMBOL_NOVERS(register_hotswap_hook);
+EXPORT_SYMBOL_NOVERS(unregister_hotswap_hook);
+#endif
+#endif
+
+/* List of registered drivers. */
+static struct drv_id_info *drv_list;
+/* List of detected PCI devices, for APM events. */
+static struct dev_info {
+	struct dev_info *next;
+	void *dev;
+	struct drv_id_info *drv_id;
+	int flags;
+} *dev_list;
+
+/*
+  This code is not intended to support every configuration.
+  It is intended to minimize duplicated code by providing the functions
+  needed in almost every PCI driver.
+
+  The "no kitchen sink" policy:
+  Additional features and code will be added to this module only if more
+  than half of the drivers for common hardware would benefit from the feature.
+*/
+
+/*
+  Ideally we would detect and number all cards of a type (e.g. network) in
+  PCI slot order.
+  But that does not work with hot-swap card, CardBus cards and added drivers.
+  So instead we detect just the each chip table in slot order.
+
+  This routine takes a PCI ID table, scans the PCI bus, and calls the
+  associated attach/probe1 routine with the hardware already activated and
+  single I/O or memory address already mapped.
+
+  This routine will later be supplemented with CardBus and hot-swap PCI
+  support using the same table.  Thus the pci_chip_tbl[] should not be
+  marked as __initdata.
+*/
+
+#if LINUX_VERSION_CODE >= 0x20200
+/* Grrrr.. complex abstaction layers with negative benefit. */
+int pci_drv_register(struct drv_id_info *drv_id, void *initial_device)
+{
+	int chip_idx, cards_found = 0;
+	struct pci_dev *pdev = NULL;
+	struct pci_id_info *pci_tbl = drv_id->pci_dev_tbl;
+	struct drv_id_info *drv;
+	void *newdev;
+
+
+	/* Ignore a double-register attempt. */
+	for (drv = drv_list; drv; drv = drv->next)
+		if (drv == drv_id)
+			return -EBUSY;
+
+	while ((pdev = pci_find_class(drv_id->pci_class, pdev)) != 0) {
+		u32 pci_id, pci_subsys_id, pci_class_rev;
+		u16 pci_command, new_command;
+		int pci_flags;
+		long pciaddr;			/* Bus address. */
+		long ioaddr;			/* Mapped address for this processor. */
+
+		pci_read_config_dword(pdev, PCI_VENDOR_ID, &pci_id);
+		/* Offset 0x2c is PCI_SUBSYSTEM_ID aka PCI_SUBSYSTEM_VENDOR_ID. */
+		pci_read_config_dword(pdev, 0x2c, &pci_subsys_id);
+		pci_read_config_dword(pdev, PCI_REVISION_ID, &pci_class_rev);
+
+		if (msg_level > 3)
+			printk(KERN_DEBUG "PCI ID %8.8x subsystem ID is %8.8x.\n",
+				   pci_id, pci_subsys_id);
+		for (chip_idx = 0; pci_tbl[chip_idx].name; chip_idx++) {
+			struct pci_id_info *chip = &pci_tbl[chip_idx];
+			if ((pci_id & chip->id.pci_mask) == chip->id.pci
+				&& (pci_subsys_id&chip->id.subsystem_mask) == chip->id.subsystem
+				&& (pci_class_rev&chip->id.revision_mask) == chip->id.revision)
+				break;
+		}
+		if (pci_tbl[chip_idx].name == 0) 		/* Compiled out! */
+			continue;
+
+		pci_flags = pci_tbl[chip_idx].pci_flags;
+#if LINUX_VERSION_CODE >= 0x2030C
+		/* Wow. A oversized, hard-to-use abstraction. Bogus. */
+		pciaddr = pdev->resource[(pci_flags >> 4) & 7].start;
+#else
+		pciaddr = pdev->base_address[(pci_flags >> 4) & 7];
+#if defined(__alpha__)			/* Really any machine with 64 bit addressing. */
+		if (pci_flags & PCI_ADDR_64BITS)
+			pciaddr |= ((long)pdev->base_address[((pci_flags>>4)&7)+ 1]) << 32;
+#endif
+#endif
+		if (msg_level > 2)
+			printk(KERN_INFO "Found %s at PCI address %#lx, mapped IRQ %d.\n",
+				   pci_tbl[chip_idx].name, pciaddr, pdev->irq);
+
+		if ( ! (pci_flags & PCI_UNUSED_IRQ)  &&
+			 (pdev->irq == 0 || pdev->irq == 255)) {
+			if (pdev->bus->number == 32) 	/* Broken CardBus activation. */
+				printk(KERN_WARNING "Resources for CardBus device '%s' have"
+					   " not been allocated.\n"
+					   KERN_WARNING "Activation has been delayed.\n",
+					   pci_tbl[chip_idx].name);
+			else
+				printk(KERN_WARNING "PCI device '%s' was not assigned an "
+					   "IRQ.\n"
+					   KERN_WARNING "It will not be activated.\n",
+				   pci_tbl[chip_idx].name);
+			continue;
+		}
+		if ((pci_flags & PCI_BASE_ADDRESS_SPACE_IO)) {
+			ioaddr = pciaddr & PCI_BASE_ADDRESS_IO_MASK;
+			if (check_region(ioaddr, pci_tbl[chip_idx].io_size))
+				continue;
+		} else if ((ioaddr = (long)ioremap(pciaddr & PCI_BASE_ADDRESS_MEM_MASK,
+										   pci_tbl[chip_idx].io_size)) == 0) {
+			printk(KERN_INFO "Failed to map PCI address %#lx for device "
+				   "'%s'.\n", pciaddr, pci_tbl[chip_idx].name);
+			continue;
+		}
+		if ( ! (pci_flags & PCI_NO_ACPI_WAKE))
+			acpi_wake(pdev);
+		pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+		new_command = pci_command | (pci_flags & 7);
+		if (pci_command != new_command) {
+			printk(KERN_INFO "  The PCI BIOS has not enabled the"
+				   " device at %d/%d!  Updating PCI command %4.4x->%4.4x.\n",
+				   pdev->bus->number, pdev->devfn, pci_command, new_command);
+			pci_write_config_word(pdev, PCI_COMMAND, new_command);
+		}
+
+		newdev = drv_id->probe1(pdev, initial_device,
+								ioaddr, pdev->irq, chip_idx, cards_found);
+		if (newdev == NULL)
+			continue;
+		initial_device = 0;
+		cards_found++;
+		if (pci_flags & PCI_COMMAND_MASTER) {
+			pci_set_master(pdev);
+			if ( ! (pci_flags & PCI_NO_MIN_LATENCY)) {
+				u8 pci_latency;
+				pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
+				if (pci_latency < min_pci_latency) {
+					printk(KERN_INFO "  PCI latency timer (CFLT) is "
+						   "unreasonably low at %d.  Setting to %d clocks.\n",
+						   pci_latency, min_pci_latency);
+					pci_write_config_byte(pdev, PCI_LATENCY_TIMER,
+										  min_pci_latency);
+				}
+			}
+		}
+		{
+			struct dev_info *devp =
+				kmalloc(sizeof(struct dev_info), GFP_KERNEL);
+			if (devp == 0)
+				continue;
+			devp->next = dev_list;
+			devp->dev = newdev;
+			devp->drv_id = drv_id;
+			dev_list = devp;
+		}
+	}
+
+	if (((drv_id->flags & PCI_HOTSWAP)
+		 && register_hotswap_hook && (*register_hotswap_hook)(drv_id) == 0)
+		|| cards_found) {
+		MOD_INC_USE_COUNT;
+		drv_id->next = drv_list;
+		drv_list = drv_id;
+		return 0;
+	} else
+		return -ENODEV;
+}
+#else
+int pci_drv_register(struct drv_id_info *drv_id, void *initial_device)
+{
+	int pci_index, cards_found = 0;
+	unsigned char pci_bus, pci_device_fn;
+	struct pci_dev *pdev;
+	struct pci_id_info *pci_tbl = drv_id->pci_dev_tbl;
+	void *newdev;
+
+	if ( ! pcibios_present())
+		return -ENODEV;
+
+	for (pci_index = 0; pci_index < 0xff; pci_index++) {
+		u32 pci_id, subsys_id, pci_class_rev;
+		u16 pci_command, new_command;
+		int chip_idx, irq, pci_flags;
+		long pciaddr;
+		long ioaddr;
+		u32 pci_busaddr;
+		u8 pci_irq_line;
+
+		if (pcibios_find_class (drv_id->pci_class, pci_index,
+								&pci_bus, &pci_device_fn)
+			!= PCIBIOS_SUCCESSFUL)
+			break;
+		pcibios_read_config_dword(pci_bus, pci_device_fn,
+								  PCI_VENDOR_ID, &pci_id);
+		/* Offset 0x2c is PCI_SUBSYSTEM_ID aka PCI_SUBSYSTEM_VENDOR_ID. */
+		pcibios_read_config_dword(pci_bus, pci_device_fn, 0x2c, &subsys_id);
+		pcibios_read_config_dword(pci_bus, pci_device_fn,
+								  PCI_REVISION_ID, &pci_class_rev);
+
+		for (chip_idx = 0; pci_tbl[chip_idx].name; chip_idx++) {
+			struct pci_id_info *chip = &pci_tbl[chip_idx];
+			if ((pci_id & chip->id.pci_mask) == chip->id.pci
+				&& (subsys_id & chip->id.subsystem_mask) == chip->id.subsystem
+				&& (pci_class_rev&chip->id.revision_mask) == chip->id.revision)
+				break;
+		}
+		if (pci_tbl[chip_idx].name == 0) 		/* Compiled out! */
+			continue;
+
+		pci_flags = pci_tbl[chip_idx].pci_flags;
+		pdev = pci_find_slot(pci_bus, pci_device_fn);
+		pcibios_read_config_byte(pci_bus, pci_device_fn,
+								 PCI_INTERRUPT_LINE, &pci_irq_line);
+		irq = pci_irq_line;
+		pcibios_read_config_dword(pci_bus, pci_device_fn,
+								  ((pci_flags >> 2) & 0x1C) + 0x10,
+								  &pci_busaddr);
+		pciaddr = pci_busaddr;
+#if defined(__alpha__)
+		if (pci_flags & PCI_ADDR_64BITS) {
+			pcibios_read_config_dword(pci_bus, pci_device_fn,
+									  ((pci_flags >> 2) & 0x1C) + 0x14,
+									  &pci_busaddr);
+			pciaddr |= ((long)pci_busaddr)<<32;
+		}
+#endif
+
+		if (msg_level > 2)
+			printk(KERN_INFO "Found %s at PCI address %#lx, IRQ %d.\n",
+				   pci_tbl[chip_idx].name, pciaddr, irq);
+
+		if ( ! (pci_flags & PCI_UNUSED_IRQ)  &&
+			 (irq == 0 || irq == 255)) {
+			if (pci_bus == 32) 	/* Broken CardBus activation. */
+				printk(KERN_WARNING "Resources for CardBus device '%s' have"
+					   " not been allocated.\n"
+					   KERN_WARNING "It will not be activated.\n",
+					   pci_tbl[chip_idx].name);
+			else
+				printk(KERN_WARNING "PCI device '%s' was not assigned an "
+					   "IRQ.\n"
+					   KERN_WARNING "It will not be activated.\n",
+				   pci_tbl[chip_idx].name);
+			continue;
+		}
+
+		if ((pciaddr & PCI_BASE_ADDRESS_SPACE_IO)) {
+			ioaddr = pciaddr & PCI_BASE_ADDRESS_IO_MASK;
+			if (check_region(ioaddr, pci_tbl[chip_idx].io_size))
+				continue;
+		} else if ((ioaddr = (long)ioremap(pciaddr & PCI_BASE_ADDRESS_MEM_MASK,
+										   pci_tbl[chip_idx].io_size)) == 0) {
+			printk(KERN_INFO "Failed to map PCI address %#lx.\n",
+				   pciaddr);
+			continue;
+		}
+
+		if ( ! (pci_flags & PCI_NO_ACPI_WAKE))
+			acpi_wake(pdev);
+		pcibios_read_config_word(pci_bus, pci_device_fn,
+								 PCI_COMMAND, &pci_command);
+		new_command = pci_command | (pci_flags & 7);
+		if (pci_command != new_command) {
+			printk(KERN_INFO "  The PCI BIOS has not enabled the"
+				   " device at %d/%d!  Updating PCI command %4.4x->%4.4x.\n",
+				   pci_bus, pci_device_fn, pci_command, new_command);
+			pcibios_write_config_word(pci_bus, pci_device_fn,
+									  PCI_COMMAND, new_command);
+		}
+
+		newdev = drv_id->probe1(pdev, initial_device,
+							   ioaddr, irq, chip_idx, cards_found);
+
+		if (newdev  && (pci_flags & PCI_COMMAND_MASTER)  &&
+			! (pci_flags & PCI_NO_MIN_LATENCY)) {
+			u8 pci_latency;
+			pcibios_read_config_byte(pci_bus, pci_device_fn,
+									 PCI_LATENCY_TIMER, &pci_latency);
+			if (pci_latency < min_pci_latency) {
+				printk(KERN_INFO "  PCI latency timer (CFLT) is "
+					   "unreasonably low at %d.  Setting to %d clocks.\n",
+					   pci_latency, min_pci_latency);
+				pcibios_write_config_byte(pci_bus, pci_device_fn,
+										  PCI_LATENCY_TIMER, min_pci_latency);
+			}
+		}
+		if (newdev) {
+			struct dev_info *devp =
+				kmalloc(sizeof(struct dev_info), GFP_KERNEL);
+			if (devp) {
+				devp->next = dev_list;
+				devp->dev = newdev;
+				devp->drv_id = drv_id;
+				dev_list = devp;
+			}
+		}
+		initial_device = 0;
+		cards_found++;
+	}
+
+	if (((drv_id->flags & PCI_HOTSWAP)
+		 && register_hotswap_hook && (*register_hotswap_hook)(drv_id) == 0)
+		|| cards_found) {
+		MOD_INC_USE_COUNT;
+		drv_id->next = drv_list;
+		drv_list = drv_id;
+		return 0;
+	} else
+		return cards_found ? 0 : -ENODEV;
+}
+#endif
+
+void pci_drv_unregister(struct drv_id_info *drv_id)
+{
+	struct drv_id_info **drvp;
+	struct dev_info **devip = &dev_list;
+
+	if (unregister_hotswap_hook)
+		(*unregister_hotswap_hook)(drv_id);
+
+	for (drvp = &drv_list; *drvp; drvp = &(*drvp)->next)
+		if (*drvp == drv_id) {
+			*drvp = (*drvp)->next;
+			MOD_DEC_USE_COUNT;
+			break;
+		}
+	while (*devip) {
+		struct dev_info *thisdevi = *devip;
+		if (thisdevi->drv_id == drv_id) {
+			*devip = thisdevi->next;
+			kfree(thisdevi);
+		} else
+			devip = &(*devip)->next;
+	}
+
+	return;
+}
+
+#if LINUX_VERSION_CODE < 0x20400
+/*
+  Search PCI configuration space for the specified capability registers.
+  Return the index, or 0 on failure.
+  The 2.4 kernel now includes this function.
+*/
+int pci_find_capability(struct pci_dev *pdev, int findtype)
+{
+	u16 pci_status, cap_type;
+	u8 pci_cap_idx;
+	int cap_idx;
+
+	pci_read_config_word(pdev, PCI_STATUS, &pci_status);
+	if ( ! (pci_status & PCI_STATUS_CAP_LIST))
+		return 0;
+	pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pci_cap_idx);
+	cap_idx = pci_cap_idx;
+	for (cap_idx = pci_cap_idx; cap_idx; cap_idx = (cap_type >> 8) & 0xff) {
+		pci_read_config_word(pdev, cap_idx, &cap_type);
+		if ((cap_type & 0xff) == findtype)
+			return cap_idx;
+	}
+	return 0;
+}
+#endif
+
+/* Change a device from D3 (sleep) to D0 (active).
+   Return the old power state.
+   This is more complicated than you might first expect since most cards
+   forget all PCI config info during the transition! */
+int acpi_wake(struct pci_dev *pdev)
+{
+	u32 base[5], romaddr;
+	u16 pci_command, pwr_command;
+	u8  pci_latency, pci_cacheline, irq;
+	int i, pwr_cmd_idx = pci_find_capability(pdev, PCI_CAP_ID_PM);
+
+	if (pwr_cmd_idx == 0)
+		return 0;
+	pci_read_config_word(pdev, pwr_cmd_idx + 4, &pwr_command);
+	if ((pwr_command & 3) == 0)
+		return 0;
+	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+	for (i = 0; i < 5; i++)
+		pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0 + i*4,
+								  &base[i]);
+	pci_read_config_dword(pdev, PCI_ROM_ADDRESS, &romaddr);
+	pci_read_config_byte( pdev, PCI_LATENCY_TIMER, &pci_latency);
+	pci_read_config_byte( pdev, PCI_CACHE_LINE_SIZE, &pci_cacheline);
+	pci_read_config_byte( pdev, PCI_INTERRUPT_LINE, &irq);
+
+	pci_write_config_word(pdev, pwr_cmd_idx + 4, 0x0000);
+	for (i = 0; i < 5; i++)
+		if (base[i])
+			pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0 + i*4,
+									   base[i]);
+	pci_write_config_dword(pdev, PCI_ROM_ADDRESS, romaddr);
+	pci_write_config_byte( pdev, PCI_INTERRUPT_LINE, irq);
+	pci_write_config_byte( pdev, PCI_CACHE_LINE_SIZE, pci_cacheline);
+	pci_write_config_byte( pdev, PCI_LATENCY_TIMER, pci_latency);
+	pci_write_config_word( pdev, PCI_COMMAND, pci_command | 5);
+	return pwr_command & 3;
+}
+
+int acpi_set_pwr_state(struct pci_dev *pdev, enum acpi_pwr_state new_state)
+{
+	u16 pwr_command;
+	int pwr_cmd_idx = pci_find_capability(pdev, PCI_CAP_ID_PM);
+
+	if (pwr_cmd_idx == 0)
+		return 0;
+	pci_read_config_word(pdev, pwr_cmd_idx + 4, &pwr_command);
+	if ((pwr_command & 3) == ACPI_D3  &&  new_state != ACPI_D3)
+		acpi_wake(pdev);		/* The complicated sequence. */
+	pci_write_config_word(pdev, pwr_cmd_idx + 4,
+							  (pwr_command & ~3) | new_state);
+	return pwr_command & 3;
+}
+
+#if defined(CONFIG_PM)
+static int handle_pm_event(struct pm_dev *dev, int event, void *data)
+{
+	static int down = 0;
+	struct dev_info *devi;
+	int pwr_cmd = -1;
+
+	if (msg_level > 1)
+		printk(KERN_DEBUG "pci-scan: Handling power event %d for driver "
+			   "list %s...\n",
+			   event, drv_list->name);
+	switch (event) {
+	case PM_SUSPEND:
+		if (down) {
+			printk(KERN_DEBUG "pci-scan: Received extra suspend event\n");
+			break;
+		}
+		down = 1;
+		for (devi = dev_list; devi; devi = devi->next)
+			if (devi->drv_id->pwr_event)
+				devi->drv_id->pwr_event(devi->dev, DRV_SUSPEND);
+		break;
+	case PM_RESUME:
+		if (!down) {
+			printk(KERN_DEBUG "pci-scan: Received bogus resume event\n");
+			break;
+		}
+		for (devi = dev_list; devi; devi = devi->next) {
+			if (devi->drv_id->pwr_event) {
+				if (msg_level > 3)
+					printk(KERN_DEBUG "pci-scan: Calling resume for %s "
+						   "device.\n", devi->drv_id->name);
+				devi->drv_id->pwr_event(devi->dev, DRV_RESUME);
+			}
+		}
+		down = 0;
+		break;
+	case PM_SET_WAKEUP: pwr_cmd = DRV_PWR_WakeOn; break;
+	case PM_EJECT:		pwr_cmd = DRV_DETACH;	break;
+	default:
+		printk(KERN_DEBUG "pci-scan: Unknown power management event %d.\n",
+			   event);
+	}
+	if (pwr_cmd >= 0)
+		for (devi = dev_list; devi; devi = devi->next)
+			if (devi->drv_id->pwr_event)
+				devi->drv_id->pwr_event(devi->dev, pwr_cmd);
+
+	return 0;
+}
+
+#elif defined(CONFIG_APM)  &&  LINUX_VERSION_CODE < 0x20400 
+static int handle_apm_event(apm_event_t event)
+{
+	static int down = 0;
+	struct dev_info *devi;
+
+	if (msg_level > 1)
+		printk(KERN_DEBUG "pci-scan: Handling APM event %d for driver "
+			   "list %s...\n",
+			   event, drv_list->name);
+	return 0;
+	switch (event) {
+	case APM_SYS_SUSPEND:
+	case APM_USER_SUSPEND:
+		if (down) {
+			printk(KERN_DEBUG "pci-scan: Received extra suspend event\n");
+			break;
+		}
+		down = 1;
+		for (devi = dev_list; devi; devi = devi->next)
+			if (devi->drv_id->pwr_event)
+				devi->drv_id->pwr_event(devi->dev, DRV_SUSPEND);
+		break;
+	case APM_NORMAL_RESUME:
+	case APM_CRITICAL_RESUME:
+		if (!down) {
+			printk(KERN_DEBUG "pci-scan: Received bogus resume event\n");
+			break;
+		}
+		for (devi = dev_list; devi; devi = devi->next)
+			if (devi->drv_id->pwr_event)
+				devi->drv_id->pwr_event(devi->dev, DRV_RESUME);
+		down = 0;
+		break;
+	}
+	return 0;
+}
+#endif /* CONFIG_APM */
+
+#ifdef MODULE
+int init_module(void)
+{
+	if (msg_level)	/* Emit version even if no cards detected. */
+		printk(KERN_INFO "%s", version);
+
+#if defined(CONFIG_PM)
+	pm_register(PM_PCI_DEV, 0, &handle_pm_event);
+#elif defined(CONFIG_APM)  &&  LINUX_VERSION_CODE < 0x20400 
+	apm_register_callback(&handle_apm_event);
+#endif
+	return 0;
+}
+void cleanup_module(void)
+{
+#if defined(CONFIG_PM)
+	pm_unregister_all(&handle_pm_event);
+#elif defined(CONFIG_APM)  &&  LINUX_VERSION_CODE < 0x20400 
+	apm_unregister_callback(&handle_apm_event);
+#endif
+	if (dev_list != NULL)
+		printk(KERN_WARNING "pci-scan: Unfreed device references.\n");
+	return;
+}
+#endif
+
+
+/*
+ * Local variables:
+ *  compile-command: "gcc -DMODULE -D__KERNEL__ -DEXPORT_SYMTAB -Wall -Wstrict-prototypes -O6 -c pci-scan.c"
+ *  c-indent-level: 4
+ *  c-basic-offset: 4
+ *  tab-width: 4
+ * End:
+ */
diff -uNr net/drivers/net/pci-scan.h linux-2.4.20/drivers/net/pci-scan.h
--- net/drivers/net/pci-scan.h	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.4.20/drivers/net/pci-scan.h	2003-01-14 20:29:34.000000000 -0500
@@ -0,0 +1,90 @@
+#ifndef _PCI_SCAN_H
+#define _PCI_SCAN_H
+/*
+  version 1.02 $Version:$ $Date: 2001/03/18 21:35:59 $
+   Copyright 1999-2001 Donald Becker / Scyld Computing Corporation
+   This software is part of the Linux kernel.  It may be used and
+   distributed according to the terms of the GNU Public License,
+   incorporated herein by reference.
+*/
+
+/*
+  These are the structures in the table that drives the PCI probe routines.
+  Note the matching code uses a bitmask: more specific table entries should
+  be placed before "catch-all" entries.
+
+  The table must be zero terminated.
+*/
+enum pci_id_flags_bits {
+	/* Set PCI command register bits before calling probe1(). */
+	PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
+	/* Read and map the single following PCI BAR. */
+	PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
+	PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
+	PCI_UNUSED_IRQ=0x800,
+};
+
+struct pci_id_info {
+	const char *name;
+	struct match_info {
+		int	pci, pci_mask, subsystem, subsystem_mask;
+		int revision, revision_mask; 				/* Only 8 bits. */
+	} id;
+	enum pci_id_flags_bits pci_flags;
+	int io_size;				/* Needed for I/O region check or ioremap(). */
+	int drv_flags;				/* Driver use, intended as capability flags. */
+};
+
+enum drv_id_flags {
+	PCI_HOTSWAP=1, /* Leave module loaded for Cardbus-like chips. */
+};
+enum drv_pwr_action {
+	DRV_NOOP,			/* No action. */
+	DRV_ATTACH,			/* The driver may expect power ops. */
+	DRV_SUSPEND,		/* Machine suspending, next event RESUME or DETACH. */
+	DRV_RESUME,			/* Resume from previous SUSPEND  */
+	DRV_DETACH,			/* Card will-be/is gone. Valid from SUSPEND! */
+	DRV_PWR_WakeOn,		/* Put device in e.g. Wake-On-LAN mode. */
+	DRV_PWR_DOWN,		/* Go to lowest power mode. */
+	DRV_PWR_UP,			/* Go to normal power mode. */
+};
+
+struct drv_id_info {
+	const char *name;			/* Single-word driver name. */
+	int flags;
+	int pci_class;				/* Typically PCI_CLASS_NETWORK_ETHERNET<<8. */
+	struct pci_id_info *pci_dev_tbl;
+	void *(*probe1)(struct pci_dev *pdev, void *dev_ptr,
+					long ioaddr, int irq, int table_idx, int fnd_cnt);
+	/* Optional, called for suspend, resume and detach. */
+	int (*pwr_event)(void *dev, int event);
+	/* Internal values. */
+	struct drv_id_info *next;
+	void *cb_ops;
+};
+
+/*  PCI scan and activate.
+	Scan PCI-like hardware, calling probe1(..,dev,..) on devices that match.
+	Returns -ENODEV, a negative number, if no cards are found. */
+
+extern int pci_drv_register(struct drv_id_info *drv_id, void *initial_device);
+extern void pci_drv_unregister(struct drv_id_info *drv_id);
+
+
+/*  ACPI routines.
+	Wake (change to ACPI D0 state) or set the ACPI power level of a sleeping
+	ACPI device.  Returns the old power state.  */
+
+int acpi_wake(struct pci_dev *pdev);
+enum  acpi_pwr_state {ACPI_D0, ACPI_D1, ACPI_D2, ACPI_D3};
+int acpi_set_pwr_state(struct pci_dev *pdev, enum acpi_pwr_state state);
+
+
+/*
+ * Local variables:
+ *  c-indent-level: 4
+ *  c-basic-offset: 4
+ *  tab-width: 4
+ * End:
+ */
+#endif
diff -uNr net/drivers/net/pci-serial.c linux-2.4.20/drivers/net/pci-serial.c
--- net/drivers/net/pci-serial.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.4.20/drivers/net/pci-serial.c	2003-01-14 20:29:34.000000000 -0500
@@ -0,0 +1,258 @@
+/* pci-serial.c: A PCI serial port (e.g. modem) activator for Linux. */
+/*
+	This driver is an activator for PCI serial devices.
+
+	Written/copyright 1999-2002 by Donald Becker.
+
+	This software may be used and distributed according to the terms of
+	the GNU General Public License (GPL), incorporated herein by reference.
+	Drivers based on or derived from this code fall under the GPL and must
+	retain the authorship, copyright and license notice.  This file is not
+	a complete program and may only be used when the entire operating
+	system is licensed under the GPL.
+
+	The author may be reached as becker@scyld.com, or C/O
+	Scyld Computing Corporation
+	410 Severn Ave., Suite 210
+	Annapolis MD 21403
+
+	Support information and updates available at
+	http://www.scyld.com/network/updates.html
+*/
+
+static const char *version =
+"pci-serial.c:v1.03 7/30/2002 Donald Becker http://www.scyld.com/index.html\n";
+
+/* A few user-configurable values. */
+
+/* Message enable level: 0..31 = no..all messages.  See NETIF_MSG docs. */
+static int debug = 1;
+
+/* Operational parameters that usually are not changed. */
+
+#if !defined(__OPTIMIZE__)  ||  !defined(__KERNEL__)
+#warning  You must compile this file with the correct options!
+#warning  See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+#include <linux/config.h>
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/pci.h>
+#if LINUX_VERSION_CODE < 0x20155
+#include <linux/bios32.h>
+#define PCI_SUPPORT 1
+#else
+#define PCI_SUPPORT 2
+#endif
+#include <linux/major.h>
+#include <linux/serial.h>
+
+#include <asm/io.h>
+#include "kern_compat.h"
+
+#if ! defined (LINUX_VERSION_CODE) || LINUX_VERSION_CODE < 0x20000
+#warning This driver version is only for kernel versions 2.0.0 and later.
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
+MODULE_DESCRIPTION("PCI hot-swap serial port activator");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
+
+#if LINUX_VERSION_CODE < 0x20123
+#define test_and_set_bit(val, addr) set_bit(val, addr)
+#endif
+#if LINUX_VERSION_CODE < 0x20155
+#define PCI_SUPPORT_VER1
+#define pci_present pcibios_present
+#endif
+
+/*
+				Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for PCI serial ports.
+
+
+II. Board-specific settings
+
+N/A
+
+III. Operation
+
+IVb. References
+
+IVc. Errata
+
+*/
+
+/* The rest of these values should never change. */
+
+static struct cb_serial_info {
+	struct cb_serial_info *next;
+	long ioaddr;
+	int major, minor;
+	char dev_name[8];
+	u32 subsystem_id;
+	u8 pci_bus, pci_devfn, irq;
+} *cb_serial_list;
+
+int serial_attach(int bus, int devfn)
+{
+    struct serial_struct serial;
+    int line;
+	u16 device_id, vendor_id, pci_cmd;
+	u32 addr0, subsystem_id, pwr_cmd;
+	u8 irq;
+	long ioaddr;
+
+	if (debug) {
+		printk(KERN_INFO "serial_attach(bus %d, function %d).\n",
+			   bus, devfn);
+	}
+	pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_0, &addr0);
+	if ( ! (addr0 & 1))
+		pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_1, &addr0);
+	pcibios_read_config_byte(bus, devfn, PCI_INTERRUPT_LINE, &irq);
+	pcibios_read_config_word(bus, devfn, PCI_VENDOR_ID, &vendor_id);
+	pcibios_read_config_word(bus, devfn, PCI_DEVICE_ID, &device_id);
+	pcibios_read_config_dword(bus, devfn, PCI_SUBSYSTEM_ID, &subsystem_id);
+	pcibios_read_config_dword(bus, devfn, 0x44, &pwr_cmd);
+	pcibios_write_config_dword(bus, devfn, 0x44, pwr_cmd & ~3);
+	pcibios_read_config_word(bus, devfn, PCI_COMMAND, &pci_cmd);
+	ioaddr = addr0 & ~3;
+	if (ioaddr == 0 || irq == 0) {
+		printk(KERN_ERR "A CardBus serial port was not assigned an %s.\n",
+			   ioaddr == 0 ? "I/O address" : "IRQ");
+		return 0;
+	}
+	if (debug > 1) {
+		printk(KERN_INFO " PCI command register was %4.4x.\n", pci_cmd);
+		printk(KERN_INFO "serial_attach(bus %d, function %d), device %4.4x "
+			   "IRQ %d IO %lx subsystem ID %8.8x.\n", bus, devfn, device_id,
+			   irq, ioaddr, subsystem_id);
+	}
+	/* Insert vendor-specific magic here. */
+	serial.port = ioaddr;
+    serial.irq = irq;
+    serial.flags = ASYNC_SHARE_IRQ;
+    line = register_serial(&serial);
+
+	if (debug > 2) {
+		int i;
+		printk(KERN_DEBUG "pci-serial: Register dump at %#lx:", ioaddr);
+		for (i = 0; i < 8; i++)
+			printk(" %2.2x", inb(ioaddr + i));
+		printk(".\n");
+	}
+
+	if (line < 0) {
+		printk(KERN_NOTICE "serial_cb: register_serial() at 0x%04x, "
+			   "irq %d failed, status %d\n", serial.port, serial.irq, line);
+	} else {
+		struct cb_serial_info *info =
+			kmalloc(sizeof(struct cb_serial_info), GFP_KERNEL);
+		memset(info, 0, sizeof(struct cb_serial_info));
+		sprintf(info->dev_name, "ttyS%d", line);
+		info->major = TTY_MAJOR;
+		info->minor = 0x40 + line;
+		info->pci_bus = bus;
+		info->pci_devfn = devfn;
+		info->ioaddr = ioaddr;
+		info->subsystem_id = subsystem_id;
+		info->next = cb_serial_list;
+		cb_serial_list = info;
+		MOD_INC_USE_COUNT;
+		return 1;
+	}
+	return 0;
+}
+
+static void serial_detach(void)
+{
+	struct cb_serial_info *info, **infop;
+	if (debug)
+		printk(KERN_INFO "serial_detach()\n");
+	for (infop = &cb_serial_list; *infop; *infop = (*infop)->next)
+		if (1)
+			break;
+	info = *infop;
+	if (info == NULL)
+		return;
+#if 0
+	unregister_serial(node->minor - 0x40);
+#endif
+	*infop = info->next;
+	kfree(info);
+	MOD_DEC_USE_COUNT;
+	if (debug)
+		printk(KERN_INFO "serial_detach() done.\n");
+}
+
+
+#ifdef MODULE
+
+int init_module(void)
+{
+	int cards_found = 0;
+	int pci_index;
+	unsigned char pci_bus, pci_device_fn;
+
+	printk(KERN_INFO "%s", version);
+	
+	if ( ! pcibios_present())
+		return -ENODEV;
+
+	for (pci_index = 0; pci_index < 0xff; pci_index++) {
+		if (pcibios_find_class (PCI_CLASS_COMMUNICATION_OTHER << 8, pci_index,
+								&pci_bus, &pci_device_fn)
+			!= PCIBIOS_SUCCESSFUL)
+			break;
+		cards_found++;
+		serial_attach(pci_bus, pci_device_fn);
+	}
+	for (pci_index = 0; pci_index < 0xff; pci_index++) {
+		if (pcibios_find_class((PCI_CLASS_COMMUNICATION_SERIAL <<8) | 0x02,
+							   pci_index, &pci_bus, &pci_device_fn)
+			!= PCIBIOS_SUCCESSFUL)
+			break;
+		cards_found++;
+		serial_attach(pci_bus, pci_device_fn);
+	}
+	return cards_found ? 0 : -ENODEV;
+}
+
+void cleanup_module(void)
+{
+	return;
+}
+
+#endif  /* MODULE */
+
+/*
+ * Local variables:
+ *  compile-command: "make pci-serial.o"
+ *  alt-compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c pci-serial.c"
+ *  c-indent-level: 4
+ *  c-basic-offset: 4
+ *  tab-width: 4
+ * End:
+ */
diff -uNr net/drivers/net/pci-skeleton.c linux-2.4.20/drivers/net/pci-skeleton.c
--- net/drivers/net/pci-skeleton.c	2003-01-14 20:28:46.000000000 -0500
+++ linux-2.4.20/drivers/net/pci-skeleton.c	2003-01-14 20:29:35.000000000 -0500
@@ -1,1983 +1,1681 @@
+/* pci-skeleton.c: A Linux PCI network adapter skeleton device driver. */
 /*
+	Written 1998-2002 by Donald Becker.
 
-	drivers/net/pci-skeleton.c
+	This software may be used and distributed according to the terms of
+	the GNU General Public License (GPL), incorporated herein by reference.
+	Drivers based on or derived from this code fall under the GPL and must
+	retain the authorship, copyright and license notice.  This file is not
+	a complete program and may only be used when the entire operating
+	system is licensed under the GPL.
+
+	The author may be reached as becker@scyld.com, or C/O
+	Scyld Computing Corporation
+	410 Severn Ave., Suite 210
+	Annapolis MD 21403
 
-	Maintained by Jeff Garzik <jgarzik@mandrakesoft.com>
-
-	Original code came from 8139too.c, which in turns was based
-	originally on Donald Becker's rtl8139.c driver, versions 1.11
-	and older.  This driver was originally based on rtl8139.c
-	version 1.07.  Header of rtl8139.c version 1.11:
-
-	-----<snip>-----
-
-        	Written 1997-2000 by Donald Becker.
-		This software may be used and distributed according to the
-		terms of the GNU General Public License (GPL), incorporated
-		herein by reference.  Drivers based on or derived from this
-		code fall under the GPL and must retain the authorship,
-		copyright and license notice.  This file is not a complete
-		program and may only be used when the entire operating
-		system is licensed under the GPL.
-
-		This driver is for boards based on the RTL8129 and RTL8139
-		PCI ethernet chips.
-
-		The author may be reached as becker@scyld.com, or C/O Scyld
-		Computing Corporation 410 Severn Ave., Suite 210 Annapolis
-		MD 21403
-
-		Support and updates available at
-		http://www.scyld.com/network/rtl8139.html
-
-		Twister-tuning table provided by Kinston
-		<shangh@realtek.com.tw>.
-
-	-----<snip>-----
-
-	This software may be used and distributed according to the terms
-	of the GNU General Public License, incorporated herein by reference.
-
-
------------------------------------------------------------------------------
-
-				Theory of Operation
-
-I. Board Compatibility
-
-This device driver is designed for the RealTek RTL8139 series, the RealTek
-Fast Ethernet controllers for PCI and CardBus.  This chip is used on many
-low-end boards, sometimes with its markings changed.
+	Support information and updates available at
+	http://www.scyld.com/network/pci-skeleton.html
+*/
 
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"pci-skeleton.c:v2.12 11/17/2002 Written by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+"  http://www.scyld.com/network/drivers.html\n";
 
-II. Board-specific settings
+/* The user-configurable values.
+   These may be modified when a driver module is loaded.*/
 
-PCI bus devices are configured by the system at boot time, so no jumpers
-need to be set on the board.  The system BIOS will assign the
-PCI INTA signal to a (preferably otherwise unused) system IRQ line.
+/* Message enable level: 0..31 = no..all messages.  See NETIF_MSG docs. */
+static int debug = 2;
 
-III. Driver operation
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
 
-IIIa. Rx Ring buffers
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+   Typical is a 64 element hash table based on the Ethernet CRC.  */
+static int multicast_filter_limit = 32;
 
-The receive unit uses a single linear ring buffer rather than the more
-common (and more efficient) descriptor-based architecture.  Incoming frames
-are sequentially stored into the Rx region, and the host copies them into
-skbuffs.
-
-Comment: While it is theoretically possible to process many frames in place,
-any delay in Rx processing would cause us to drop frames.  More importantly,
-the Linux protocol stack is not designed to operate in this manner.
-
-IIIb. Tx operation
-
-The RTL8139 uses a fixed set of four Tx descriptors in register space.
-In a stunningly bad design choice, Tx frames must be 32 bit aligned.  Linux
-aligns the IP header on word boundaries, and 14 byte ethernet header means
-that almost all frames will need to be copied to an alignment buffer.
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+   Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak = 0;
+
+/* Used to pass the media type, etc.
+   Both 'options[]' and 'full_duplex[]' should exist for driver
+   interoperability, however setting full_duplex[] is depricated.
+   The media type is usually passed in 'options[]'.
+    The default is autonegotation for speed and duplex.
+	This should rarely be overridden.
+    Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
+    Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
+    Use option values 0x20 and 0x200 for forcing full duplex operation.
+*/
+#define MAX_UNITS 8		/* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+   Making the Tx ring too large decreases the effectiveness of channel
+   bonding and packet priority.
+   There are no ill effects from too-large receive rings. */
+#define TX_RING_SIZE	16
+#define TX_QUEUE_LEN	10		/* Limit ring entries actually used.  */
+#define RX_RING_SIZE	32
 
-IVb. References
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT  (6*HZ)
 
-http://www.realtek.com.tw/cn/cn.html
-http://www.scyld.com/expert/NWay.html
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+   Do not change this value without good reason.  This is not a limit,
+   but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ		1536
 
-IVc. Errata
+/* Set iff a MII transceiver on any interface requires mdio preamble.
+   This only set with older tranceivers, so the extra
+   code size of a per-interface flag is not worthwhile. */
+static char mii_preamble_required = 0;
 
-*/
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning  You must compile this file with the correct options!
+#warning  See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
 
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
 #include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
 #include <linux/module.h>
+
 #include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
 #include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+/* Bogus change in the middle of a "stable" kernel series.
+   In 2.4.7+ slab must come before interrupt.h to avoid mystery breakage. */
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
-#include <linux/delay.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/crc32.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h>		/* Processor type for cache alignment. */
+#include <asm/bitops.h>
 #include <asm/io.h>
 
-#define NETDRV_VERSION		"1.0.0"
-#define MODNAME			"netdrv"
-#define NETDRV_DRIVER_LOAD_MSG	"MyVendor Fast Ethernet driver " NETDRV_VERSION " loaded"
-#define PFX			MODNAME ": "
-
-static char version[] __devinitdata =
-KERN_INFO NETDRV_DRIVER_LOAD_MSG "\n"
-KERN_INFO "  Support available from http://foo.com/bar/baz.html\n";
-
-/* define to 1 to enable PIO instead of MMIO */
-#undef USE_IO_OPS
-
-/* define to 1 to enable copious debugging info */
-#undef NETDRV_DEBUG
-
-/* define to 1 to disable lightweight runtime debugging checks */
-#undef NETDRV_NDEBUG
-
-
-#ifdef NETDRV_DEBUG
-/* note: prints function name for you */
-#  define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
-#else
-#  define DPRINTK(fmt, args...)
+#if LINUX_VERSION_CODE >= 0x20300
+#include <linux/spinlock.h>
+#elif LINUX_VERSION_CODE >= 0x20200
+#include <asm/spinlock.h>
 #endif
 
-#ifdef NETDRV_NDEBUG
-#  define assert(expr) do {} while (0)
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
 #else
-#  define assert(expr) \
-        if(!(expr)) {					\
-        printk( "Assertion failed! %s,%s,%s,line=%d\n",	\
-        #expr,__FILE__,__FUNCTION__,__LINE__);		\
-        }
+#include "pci-scan.h"
+#include "kern_compat.h"
 #endif
 
+/* Condensed operations for readability. */
+#define virt_to_le32desc(addr)  cpu_to_le32(virt_to_bus(addr))
+#define le32desc_to_virt(addr)  bus_to_virt(le32_to_cpu(addr))
 
-/* A few user-configurable values. */
-/* media options */
-static int media[] = {-1, -1, -1, -1, -1, -1, -1, -1};
-
-/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
-static int max_interrupt_work = 20;
-
-/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
-   The RTL chips use a 64 element hash table based on the Ethernet CRC.  */
-static int multicast_filter_limit = 32;
-
-/* Size of the in-memory receive ring. */
-#define RX_BUF_LEN_IDX	2	/* 0==8K, 1==16K, 2==32K, 3==64K */
-#define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX)
-#define RX_BUF_PAD 16
-#define RX_BUF_WRAP_PAD 2048 /* spare padding to handle lack of packet wrap */
-#define RX_BUF_TOT_LEN (RX_BUF_LEN + RX_BUF_PAD + RX_BUF_WRAP_PAD)
-
-/* Number of Tx descriptor registers. */
-#define NUM_TX_DESC	4
-
-/* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/
-#define MAX_ETH_FRAME_SIZE	1536
-
-/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
-#define TX_BUF_SIZE	MAX_ETH_FRAME_SIZE
-#define TX_BUF_TOT_LEN	(TX_BUF_SIZE * NUM_TX_DESC)
-
-/* PCI Tuning Parameters
-   Threshold is bytes transferred to chip before transmission starts. */
-#define TX_FIFO_THRESH 256	/* In bytes, rounded down to 32 byte units. */
-
-/* The following settings are log_2(bytes)-4:  0 == 16 bytes .. 6==1024, 7==end of packet. */
-#define RX_FIFO_THRESH	6	/* Rx buffer level before first PCI xfer.  */
-#define RX_DMA_BURST	6	/* Maximum PCI burst, '6' is 1024 */
-#define TX_DMA_BURST	6	/* Maximum PCI burst, '6' is 1024 */
-
-
-/* Operational parameters that usually are not changed. */
-/* Time in jiffies before concluding the transmitter is hung. */
-#define TX_TIMEOUT  (6*HZ)
-
-
-enum {
-	HAS_CHIP_XCVR = 0x020000,
-	HAS_LNK_CHNG = 0x040000,
-};
-
-#define NETDRV_MIN_IO_SIZE 0x80
-#define RTL8139B_IO_SIZE 256
-
-#define NETDRV_CAPS	HAS_CHIP_XCVR|HAS_LNK_CHNG
-
-typedef enum {
-	RTL8139 = 0,
-	NETDRV_CB,
-	SMC1211TX,
-	/*MPX5030,*/
-	DELTA8139,
-	ADDTRON8139,
-} board_t;
-
-
-/* indexed by board_t, above */
-static struct {
-	const char *name;
-} board_info[] __devinitdata = {
-	{ "RealTek RTL8139 Fast Ethernet" },
-	{ "RealTek RTL8139B PCI/CardBus" },
-	{ "SMC1211TX EZCard 10/100 (RealTek RTL8139)" },
-/*	{ MPX5030, "Accton MPX5030 (RealTek RTL8139)" },*/
-	{ "Delta Electronics 8139 10/100BaseTX" },
-	{ "Addtron Technolgy 8139 10/100BaseTX" },
-};
-
-
-static struct pci_device_id netdrv_pci_tbl[] __devinitdata = {
-	{0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
-	{0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NETDRV_CB },
-	{0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMC1211TX },
-/*	{0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MPX5030 },*/
-	{0x1500, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DELTA8139 },
-	{0x4033, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ADDTRON8139 },
-	{0,}
-};
-MODULE_DEVICE_TABLE (pci, netdrv_pci_tbl);
-
-
-/* The rest of these values should never change. */
-
-/* Symbolic offsets to registers. */
-enum NETDRV_registers {
-	MAC0 = 0,		/* Ethernet hardware address. */
-	MAR0 = 8,		/* Multicast filter. */
-	TxStatus0 = 0x10,	/* Transmit status (Four 32bit registers). */
-	TxAddr0 = 0x20,		/* Tx descriptors (also four 32bit). */
-	RxBuf = 0x30,
-	RxEarlyCnt = 0x34,
-	RxEarlyStatus = 0x36,
-	ChipCmd = 0x37,
-	RxBufPtr = 0x38,
-	RxBufAddr = 0x3A,
-	IntrMask = 0x3C,
-	IntrStatus = 0x3E,
-	TxConfig = 0x40,
-	ChipVersion = 0x43,
-	RxConfig = 0x44,
-	Timer = 0x48,		/* A general-purpose counter. */
-	RxMissed = 0x4C,	/* 24 bits valid, write clears. */
-	Cfg9346 = 0x50,
-	Config0 = 0x51,
-	Config1 = 0x52,
-	FlashReg = 0x54,
-	MediaStatus = 0x58,
-	Config3 = 0x59,
-	Config4 = 0x5A,		/* absent on RTL-8139A */
-	HltClk = 0x5B,
-	MultiIntr = 0x5C,
-	TxSummary = 0x60,
-	BasicModeCtrl = 0x62,
-	BasicModeStatus = 0x64,
-	NWayAdvert = 0x66,
-	NWayLPAR = 0x68,
-	NWayExpansion = 0x6A,
-	/* Undocumented registers, but required for proper operation. */
-	FIFOTMS = 0x70,		/* FIFO Control and test. */
-	CSCR = 0x74,		/* Chip Status and Configuration Register. */
-	PARA78 = 0x78,
-	PARA7c = 0x7c,		/* Magic transceiver parameter register. */
-	Config5 = 0xD8,		/* absent on RTL-8139A */
-};
+#if (LINUX_VERSION_CODE >= 0x20100)  &&  defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
 
-enum ClearBitMasks {
-	MultiIntrClear = 0xF000,
-	ChipCmdClear = 0xE2,
-	Config1Clear = (1<<7)|(1<<6)|(1<<3)|(1<<2)|(1<<1),
-};
+/* Kernels before 2.1.0 cannot map the high addrs assigned by some BIOSes. */
+#if (LINUX_VERSION_CODE < 0x20100)  ||  ! defined(MODULE)
+#define USE_IO_OPS
+#endif
 
-enum ChipCmdBits {
-	CmdReset = 0x10,
-	CmdRxEnb = 0x08,
-	CmdTxEnb = 0x04,
-	RxBufEmpty = 0x01,
-};
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("PCI network skeleton Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(max_interrupt_work,
+				 "Driver maximum events handled per interrupt");
+MODULE_PARM_DESC(full_duplex,
+				 "Non-zero to set forced full duplex (depricated).");
+MODULE_PARM_DESC(rx_copybreak,
+				 "Breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+				 "Multicast addresses before switching to Rx-all-multicast");
 
-/* Interrupt register bits, using my own meaningful names. */
-enum IntrStatusBits {
-	PCIErr = 0x8000,
-	PCSTimeout = 0x4000,
-	RxFIFOOver = 0x40,
-	RxUnderrun = 0x20,
-	RxOverflow = 0x10,
-	TxErr = 0x08,
-	TxOK = 0x04,
-	RxErr = 0x02,
-	RxOK = 0x01,
-};
-enum TxStatusBits {
-	TxHostOwns = 0x2000,
-	TxUnderrun = 0x4000,
-	TxStatOK = 0x8000,
-	TxOutOfWindow = 0x20000000,
-	TxAborted = 0x40000000,
-	TxCarrierLost = 0x80000000,
-};
-enum RxStatusBits {
-	RxMulticast = 0x8000,
-	RxPhysical = 0x4000,
-	RxBroadcast = 0x2000,
-	RxBadSymbol = 0x0020,
-	RxRunt = 0x0010,
-	RxTooLong = 0x0008,
-	RxCRCErr = 0x0004,
-	RxBadAlign = 0x0002,
-	RxStatusOK = 0x0001,
-};
+/*
+				Theory of Operation
 
-/* Bits in RxConfig. */
-enum rx_mode_bits {
-	AcceptErr = 0x20,
-	AcceptRunt = 0x10,
-	AcceptBroadcast = 0x08,
-	AcceptMulticast = 0x04,
-	AcceptMyPhys = 0x02,
-	AcceptAllPhys = 0x01,
-};
+I. Board Compatibility
 
-/* Bits in TxConfig. */
-enum tx_config_bits {
-	TxIFG1 = (1 << 25),	/* Interframe Gap Time */
-	TxIFG0 = (1 << 24),	/* Enabling these bits violates IEEE 802.3 */
-	TxLoopBack = (1 << 18) | (1 << 17), /* enable loopback test mode */
-	TxCRC = (1 << 16),	/* DISABLE appending CRC to end of Tx packets */
-	TxClearAbt = (1 << 0),	/* Clear abort (WO) */
-	TxDMAShift = 8,		/* DMA burst value (0-7) is shift this many bits */
+State the chips and boards this driver is known to work with.
+Note any similar chips or boards that will not work.
 
-	TxVersionMask = 0x7C800000, /* mask out version bits 30-26, 23 */
-};
+This driver skeleton demonstrates the driver for an idealized
+descriptor-based bus-master PCI chip.
 
-/* Bits in Config1 */
-enum Config1Bits {
-	Cfg1_PM_Enable = 0x01,
-	Cfg1_VPD_Enable = 0x02,
-	Cfg1_PIO = 0x04,
-	Cfg1_MMIO = 0x08,
-	Cfg1_LWAKE = 0x10,
-	Cfg1_Driver_Load = 0x20,
-	Cfg1_LED0 = 0x40,
-	Cfg1_LED1 = 0x80,
-};
+II. Board-specific settings
 
-enum RxConfigBits {
-	/* Early Rx threshold, none or X/16 */
-	RxCfgEarlyRxNone = 0,
-	RxCfgEarlyRxShift = 24,
-
-	/* rx fifo threshold */
-	RxCfgFIFOShift = 13,
-	RxCfgFIFONone = (7 << RxCfgFIFOShift),
-
-	/* Max DMA burst */
-	RxCfgDMAShift = 8,
-	RxCfgDMAUnlimited = (7 << RxCfgDMAShift),
-
-	/* rx ring buffer length */
-	RxCfgRcv8K = 0,
-	RxCfgRcv16K = (1 << 11),
-	RxCfgRcv32K = (1 << 12),
-	RxCfgRcv64K = (1 << 11) | (1 << 12),
+No jumpers exist on most PCI boards, so this section is usually empty.
 
-	/* Disable packet wrap at end of Rx buffer */
-	RxNoWrap = (1 << 7),
-};
+III. Driver operation
 
+IIIa. Ring buffers
 
-/* Twister tuning parameters from RealTek.
-   Completely undocumented, but required to tune bad links. */
-enum CSCRBits {
-	CSCR_LinkOKBit = 0x0400,
-	CSCR_LinkChangeBit = 0x0800,
-	CSCR_LinkStatusBits = 0x0f000,
-	CSCR_LinkDownOffCmd = 0x003c0,
-	CSCR_LinkDownCmd = 0x0f3c0,
-};
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
+Some chips explicitly use only 2^N sized rings, while others use a
+'next descriptor' pointer that the driver forms into rings.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver uses a zero-copy receive and transmit scheme.
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack.  Buffers consumed this way are replaced by newly allocated
+skbuffs in a later phase of receives.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames.  New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets.  When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine.  Copying also preloads the cache, which is
+most useful with small frames.
+
+A subtle aspect of the operation is that the IP header at offset 14 in an
+ethernet frame isn't longword aligned for further processing.
+When unaligned buffers are permitted by the hardware (and always on copies)
+frames are put into the skbuff at an offset of "+2", 16-byte aligning
+the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control.  One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag.  The other thread is the interrupt handler, which is single
+threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'lp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IIId. SMP semantics
+
+The following are serialized with respect to each other via the "xmit_lock".
+  dev->hard_start_xmit()	Transmit a packet
+  dev->tx_timeout()			Transmit watchdog for stuck Tx
+  dev->set_multicast_list()	Set the recieve filter.
+Note: The Tx timeout watchdog code is implemented by the timer routine in
+kernels up to 2.2.*.  In 2.4.* and later the timeout code is part of the
+driver interface.
+
+The following fall under the global kernel lock.  The module will not be
+unloaded during the call, unless a call with a potential reschedule e.g.
+kmalloc() is called.  No other synchronization assertion is made.
+  dev->open()
+  dev->do_ioctl()
+  dev->get_stats()
+Caution: The lock for dev->open() is commonly broken with request_irq() or
+kmalloc().  It is best to avoid any lock-breaking call in do_ioctl() and
+get_stats(), or additional module locking code must be implemented.
+
+The following is self-serialized (no simultaneous entry)
+  An handler registered with request_irq().
+
+IV. Notes
+
+There are few hard rules about writing device drivers, but I have read some
+amazingly unwise code.  Bad code often stems from the mistaken belief that
+this device driver is the most important code the machine is running.
+
+Remember that this is a real OS, not DOS.  Never mess with system hardware
+(the timer chip, DMA channels, IRQ mapping): use the hardware-independent
+kernel services instead.
+
+While there is a udelay() function, use it sparingly and only with tiny
+delays.  It is not for having the kernel wait three seconds while
+autonegotiation completes!  At boot time or module insertion time this rule
+can be relaxed somewhat, but even then the total delay should be under a
+timer tick (10msec).
+
+All loops should be checked with a 'boguscnt' limit.  That includes the
+interrupt handler, which should limit the work it does with a tunable
+parameter.  Loops that check for hardware completion should have a typical
+completion count in a comment.  An exception is traversing software
+maintained lists, most of which should be designed to grow arbitrarily long.
+
+The device driver source code file should be self-contained, and as compact
+as readability permits.  It should not be spread out over multiple source
+files, and there should only be a driver.h file in special circumstances.
 
+Finally, always support multiple devices.  That means few, if any, global
+variables.  All driver variables should be 'static'.
 
-enum Cfg9346Bits {
-	Cfg9346_Lock = 0x00,
-	Cfg9346_Unlock = 0xC0,
-};
+IVb. References
 
+http://www.scyld.com/expert/100mbps.html
+http://scyld.com/expert/NWay.html
 
-#define PARA78_default	0x78fa8388
-#define PARA7c_default	0xcb38de43	/* param[0][3] */
-#define PARA7c_xxx		0xcb38de43
-static const unsigned long param[4][4] = {
-	{0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43},
-	{0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
-	{0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
-	{0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83}
-};
+List the documentation used to write the driver.  Note any proprietary or
+trade secret information, and the agreement you have to release the same.
 
-struct ring_info {
-	struct sk_buff *skb;
-	dma_addr_t mapping;
-};
+IVc. Errata
 
+Note any known bugs or limitations.
+*/
 
-typedef enum {
-	CH_8139 = 0,
-	CH_8139_K,
-	CH_8139A,
-	CH_8139B,
-	CH_8130,
-	CH_8139C,
-} chip_t;
-
-
-/* directly indexed by chip_t, above */
-const static struct {
-	const char *name;
-	u8 version; /* from RTL8139C docs */
-	u32 RxConfigMask; /* should clear the bits supported by this chip */
-} rtl_chip_info[] = {
-	{ "RTL-8139",
-	  0x40,
-	  0xf0fe0040, /* XXX copied from RTL8139A, verify */
-	},
-
-	{ "RTL-8139 rev K",
-	  0x60,
-	  0xf0fe0040,
-	},
-
-	{ "RTL-8139A",
-	  0x70,
-	  0xf0fe0040,
-	},
-
-	{ "RTL-8139B",
-	  0x78,
-	  0xf0fc0040
-	},
-
-	{ "RTL-8130",
-	  0x7C,
-	  0xf0fe0040, /* XXX copied from RTL8139A, verify */
-	},
-
-	{ "RTL-8139C",
-	  0x74,
-	  0xf0fc0040, /* XXX copied from RTL8139B, verify */
-	},
+
 
-};
+/* This table drives the PCI probe routines.
+   Note the matching code -- the first table entry matches only the 5678 card,
+   the second all remaining 56** cards.
+*/
 
+static void *netfin_probe1(struct pci_dev *pdev, void *init_dev,
+						   long ioaddr, int irq, int chip_idx, int find_cnt);
+static int netdev_pwr_event(void *dev_instance, int event);
+enum chip_capability_flags {CanHaveMII=1, };
+#ifdef USE_IO_OPS
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO  | PCI_ADDR0)
+#else
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
+#endif
 
-struct netdrv_private {
-	board_t board;
-	void *mmio_addr;
-	int drv_flags;
-	struct pci_dev *pci_dev;
-	struct net_device_stats stats;
-	struct timer_list timer;	/* Media selection timer. */
-	unsigned char *rx_ring;
-	unsigned int cur_rx;	/* Index into the Rx buffer of next Rx pkt. */
-	unsigned int tx_flag;
-	atomic_t cur_tx;
-	atomic_t dirty_tx;
-	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
-	struct ring_info tx_info[NUM_TX_DESC];
-	unsigned char *tx_buf[NUM_TX_DESC];	/* Tx bounce buffers */
-	unsigned char *tx_bufs;	/* Tx bounce buffer region. */
-	dma_addr_t rx_ring_dma;
-	dma_addr_t tx_bufs_dma;
-	char phys[4];		/* MII device addresses. */
-	char twistie, twist_row, twist_col;	/* Twister tune state. */
-	unsigned int full_duplex:1;	/* Full-duplex operation requested. */
-	unsigned int duplex_lock:1;
-	unsigned int default_port:4;	/* Last dev->if_port value. */
-	unsigned int media2:4;	/* Secondary monitored media port. */
-	unsigned int medialock:1;	/* Don't sense media type. */
-	unsigned int mediasense:1;	/* Media sensing in progress. */
-	spinlock_t lock;
-	chip_t chipset;
+static struct pci_id_info pci_id_tbl[] = {
+	{"NetTechCom 5678 adapter", {0x56781234, 0xffffffff, },
+	 PCI_IOTYPE, 128, CanHaveMII},
+	{"NetTechCom 5600 series",  {0x56001234, 0xff00ffff, },
+	 PCI_IOTYPE, 128, CanHaveMII},
+	{0,},						/* 0 terminated list. */
 };
 
-MODULE_AUTHOR ("Jeff Garzik <jgarzik@mandrakesoft.com>");
-MODULE_DESCRIPTION ("Skeleton for a PCI Fast Ethernet driver");
-MODULE_LICENSE("GPL");
-MODULE_PARM (multicast_filter_limit, "i");
-MODULE_PARM (max_interrupt_work, "i");
-MODULE_PARM (debug, "i");
-MODULE_PARM (media, "1-" __MODULE_STRING(8) "i");
-MODULE_PARM_DESC (multicast_filter_limit, "pci-skeleton maximum number of filtered multicast addresses");
-MODULE_PARM_DESC (max_interrupt_work, "pci-skeleton maximum events handled per interrupt");
-MODULE_PARM_DESC (media, "pci-skeleton: Bits 0-3: media type, bit 17: full duplex");
-MODULE_PARM_DESC (debug, "(unused)");
-
-static int read_eeprom (void *ioaddr, int location, int addr_len);
-static int netdrv_open (struct net_device *dev);
-static int mdio_read (struct net_device *dev, int phy_id, int location);
-static void mdio_write (struct net_device *dev, int phy_id, int location,
-			int val);
-static void netdrv_timer (unsigned long data);
-static void netdrv_tx_timeout (struct net_device *dev);
-static void netdrv_init_ring (struct net_device *dev);
-static int netdrv_start_xmit (struct sk_buff *skb,
-			       struct net_device *dev);
-static void netdrv_interrupt (int irq, void *dev_instance,
-			       struct pt_regs *regs);
-static int netdrv_close (struct net_device *dev);
-static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
-static struct net_device_stats *netdrv_get_stats (struct net_device *dev);
-static void netdrv_set_rx_mode (struct net_device *dev);
-static void netdrv_hw_start (struct net_device *dev);
-
+struct drv_id_info netfin_drv_id = {
+	"netfin", 0, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl, netfin_probe1,
+	netdev_pwr_event };
 
+/* This driver was written to use PCI memory space, however x86-oriented
+   hardware sometimes works only with I/O space accesses. */
 #ifdef USE_IO_OPS
-
-#define NETDRV_R8(reg)		inb (((unsigned long)ioaddr) + (reg))
-#define NETDRV_R16(reg)		inw (((unsigned long)ioaddr) + (reg))
-#define NETDRV_R32(reg)		((unsigned long) inl (((unsigned long)ioaddr) + (reg)))
-#define NETDRV_W8(reg, val8)	outb ((val8), ((unsigned long)ioaddr) + (reg))
-#define NETDRV_W16(reg, val16)	outw ((val16), ((unsigned long)ioaddr) + (reg))
-#define NETDRV_W32(reg, val32)	outl ((val32), ((unsigned long)ioaddr) + (reg))
-#define NETDRV_W8_F		NETDRV_W8
-#define NETDRV_W16_F		NETDRV_W16
-#define NETDRV_W32_F		NETDRV_W32
 #undef readb
 #undef readw
 #undef readl
 #undef writeb
 #undef writew
 #undef writel
-#define readb(addr) inb((unsigned long)(addr))
-#define readw(addr) inw((unsigned long)(addr))
-#define readl(addr) inl((unsigned long)(addr))
-#define writeb(val,addr) outb((val),(unsigned long)(addr))
-#define writew(val,addr) outw((val),(unsigned long)(addr))
-#define writel(val,addr) outl((val),(unsigned long)(addr))
-
-#else
-
-/* write MMIO register, with flush */
-/* Flush avoids rtl8139 bug w/ posted MMIO writes */
-#define NETDRV_W8_F(reg, val8)	do { writeb ((val8), ioaddr + (reg)); readb (ioaddr + (reg)); } while (0)
-#define NETDRV_W16_F(reg, val16)	do { writew ((val16), ioaddr + (reg)); readw (ioaddr + (reg)); } while (0)
-#define NETDRV_W32_F(reg, val32)	do { writel ((val32), ioaddr + (reg)); readl (ioaddr + (reg)); } while (0)
-
-
-#if MMIO_FLUSH_AUDIT_COMPLETE
-
-/* write MMIO register */
-#define NETDRV_W8(reg, val8)	writeb ((val8), ioaddr + (reg))
-#define NETDRV_W16(reg, val16)	writew ((val16), ioaddr + (reg))
-#define NETDRV_W32(reg, val32)	writel ((val32), ioaddr + (reg))
-
-#else
-
-/* write MMIO register, then flush */
-#define NETDRV_W8		NETDRV_W8_F
-#define NETDRV_W16		NETDRV_W16_F
-#define NETDRV_W32		NETDRV_W32_F
-
-#endif /* MMIO_FLUSH_AUDIT_COMPLETE */
-
-/* read MMIO register */
-#define NETDRV_R8(reg)		readb (ioaddr + (reg))
-#define NETDRV_R16(reg)		readw (ioaddr + (reg))
-#define NETDRV_R32(reg)		((unsigned long) readl (ioaddr + (reg)))
+#define readb inb
+#define readw inw
+#define readl inl
+#define writeb outb
+#define writew outw
+#define writel outl
+#endif
 
-#endif /* USE_IO_OPS */
+/* Offsets to the device registers.
+   Unlike software-only systems, device drivers interact with complex hardware.
+   It's not useful to define symbolic names for every register bit in the
+   device.  The name can only partially document the semantics and make
+   the driver longer and more difficult to read.
+   In general, only the important configuration values or bits changed
+   multiple times should be defined symbolically.
+*/
+enum register_offsets {
+	ChipCmd=0x00, IntrStatus=0x04, IntrEnable=0x08,
+	TxStatus=0x10, TxCmd=0x14, TxRingPtr=0x18,
+	RxStatus=0x20, RxCmd=0x24, RxRingPtr=0x28,
+	EECtrl=0x40, MIICtrl=0x44, LEDCtrl=0x48,
+	StationAddr=0x50, RxMode=0x58, TxMode=0x5C,
+	RxMissed=0x60, RxCRCErrs=0x64, MulticastFilter0=0x68,MulticastFilter1=0x6C,
+	PCIBusCfg=0x70, FIFOCfg=0x74, ChipReset=0x78,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+	IntrRxDone=0x01, IntrRxEmpty=0x02, IntrRxPCIErr=0x04,
+	IntrTxDone=0x10, IntrTxEmpty=0x20, IntrTxPCIErr=0x40,
+	StatsMax=0x0100, LinkChange=0x0200, TxUnderrun=0x0400, RxOverflow=0x0800,
+	IntrNormalSummary=0x8000,	IntrAbnormalSummary=0x4000,
+};
 
+/* Bits in the RxMode register. */
+enum rx_mode_bits {
+	AcceptErr=0x20, AcceptRunt=0x10,
+	AcceptBroadcast=0x08, AcceptMulticast=0x04,
+	AcceptAllPhys=0x02, AcceptMyPhys=0x01,
+};
+
+/* Misc. bits.  Symbolic names so that may be searched for. */
+enum misc_bits {
+	ChipResetCmd=1, RxEnable=1, RxPoll=2, RxDisable=4,
+	TxEnable=1, TxPoll=2, TxDisable=4,
+	TxModeFDX=1, TxThresholdField=0x0ff0, TxThresholdInc=0x0010,
+};
+
+/* The Rx and Tx buffer descriptors. */
+/* Note that using only 32 bit fields simplifies conversion to big-endian
+   architectures. */
+struct netdev_desc {
+	u32 status;
+	u32 length;
+	u32 addr;
+	u32 next_desc;
+};
+
+/* Bits in network_desc.status */
+enum desc_status_bits {
+	DescOwn=0x80000000, DescEndPacket=0x40000000, DescEndRing=0x20000000,
+	DescIntr=0x10000000,
+	RxDescWholePkt=0x60000000,
+	RxDescErrSum=0x80, RxErrRunt=0x40, RxErrLong=0x20, RxErrFrame=0x10,
+	RxErrCRC=0x08, RxErrCode=0x04,
+	TxErrAbort=0x2000, TxErrCarrier=0x1000, TxErrLate=0x0800,
+	TxErr16Colls=0x0400, TxErrDefer=0x0200, TxErrHeartbeat=0x0100,
+	TxColls=0x00ff,
+};
+
+#define PRIV_ALIGN	15 	/* Required alignment mask */
+/* Use  __attribute__((aligned (L1_CACHE_BYTES)))  to maintain alignment
+   within the structure. */
+struct netdev_private {
+	/* Descriptor rings first for alignment. */
+	struct netdev_desc rx_ring[RX_RING_SIZE];
+	struct netdev_desc tx_ring[TX_RING_SIZE];
+	struct net_device *next_module;		/* Link for devices of this type. */
+	void *priv_addr;					/* Unaligned address for kfree */
+	const char *product_name;
+	/* The addresses of receive-in-place skbuffs. */
+	struct sk_buff* rx_skbuff[RX_RING_SIZE];
+	/* The saved address of a sent-in-place packet/buffer, for later free(). */
+	struct sk_buff* tx_skbuff[TX_RING_SIZE];
+	struct net_device_stats stats;
+	struct timer_list timer;	/* Media monitoring timer. */
+	/* Frequently used values: keep some adjacent for cache effect. */
+	int msg_level;
+	int chip_id, drv_flags;
+	struct pci_dev *pci_dev;
+	long in_interrupt;			/* Word-long for SMP locks. */
+	int max_interrupt_work;
+	int intr_enable;
+	unsigned int restore_intr_enable:1;	/* Set if temporarily masked.  */
+
+	struct netdev_desc *rx_head_desc;
+	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
+	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
+	int rx_copybreak;
+
+	unsigned int cur_tx, dirty_tx;
+	unsigned int tx_config;
+	unsigned int tx_full:1;				/* The Tx queue is full. */
 
-static const u16 netdrv_intr_mask =
-	PCIErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver |
-	TxErr | TxOK | RxErr | RxOK;
+	/* These values keep track of the transceiver/media in use. */
+	unsigned int full_duplex:1;			/* Full-duplex operation requested. */
+	unsigned int duplex_lock:1;
+	unsigned int medialock:1;			/* Do not sense media. */
+	unsigned int default_port:4;		/* Last dev->if_port value. */
+	/* Rx filter. */
+	u32 cur_rx_mode;
+	u32 rx_filter[2];
+	int multicast_filter_limit;
+
+	/* MII transceiver section. */
+	int mii_cnt;						/* MII device addresses. */
+	u16 advertising;					/* NWay media advertisement */
+	unsigned char phys[2];				/* MII device addresses. */
+};
+
+/* The station address location in the EEPROM. */
+#define EEPROM_SA_OFFSET	0x10
+
+static int  eeprom_read(long ioaddr, int location);
+static int  mdio_read(struct net_device *dev, int phy_id,
+					  unsigned int location);
+static void mdio_write(struct net_device *dev, int phy_id,
+					   unsigned int location, int value);
+static int  netdev_open(struct net_device *dev);
+static int  change_mtu(struct net_device *dev, int new_mtu);
+static void check_duplex(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void tx_timeout(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static int  start_tx(struct sk_buff *skb, struct net_device *dev);
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static void netdev_error(struct net_device *dev, int intr_status);
+static int  netdev_rx(struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int  netdev_close(struct net_device *dev);
 
-static const unsigned int netdrv_rx_config =
-	  RxCfgEarlyRxNone | RxCfgRcv32K | RxNoWrap |
-	  (RX_FIFO_THRESH << RxCfgFIFOShift) |
-	  (RX_DMA_BURST << RxCfgDMAShift);
+
 
+/* A list of our installed devices, for removing the driver module. */
+static struct net_device *root_net_dev = NULL;
 
-static int __devinit netdrv_init_board (struct pci_dev *pdev,
-					 struct net_device **dev_out,
-					 void **ioaddr_out)
+#ifndef MODULE
+/* You *must* rename this! */
+int skel_netdev_probe(struct net_device *dev)
 {
-	void *ioaddr = NULL;
-	struct net_device *dev;
-	struct netdrv_private *tp;
-	u8 tmp8;
-	int rc, i;
-	u32 pio_start, pio_end, pio_flags, pio_len;
-	unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
-	u32 tmp;
-
-	DPRINTK ("ENTER\n");
-
-	assert (pdev != NULL);
-	assert (ioaddr_out != NULL);
-
-	*ioaddr_out = NULL;
-	*dev_out = NULL;
-
-	/* dev zeroed in init_etherdev */
-	dev = alloc_etherdev (sizeof (*tp));
-	if (dev == NULL) {
-		printk (KERN_ERR PFX "unable to alloc new ethernet\n");
-		DPRINTK ("EXIT, returning -ENOMEM\n");
-		return -ENOMEM;
-	}
-	SET_MODULE_OWNER(dev);
-	tp = dev->priv;
-
-	/* enable device (incl. PCI PM wakeup), and bus-mastering */
-	rc = pci_enable_device (pdev);
-	if (rc)
-		goto err_out;
-
-	pio_start = pci_resource_start (pdev, 0);
-	pio_end = pci_resource_end (pdev, 0);
-	pio_flags = pci_resource_flags (pdev, 0);
-	pio_len = pci_resource_len (pdev, 0);
-
-	mmio_start = pci_resource_start (pdev, 1);
-	mmio_end = pci_resource_end (pdev, 1);
-	mmio_flags = pci_resource_flags (pdev, 1);
-	mmio_len = pci_resource_len (pdev, 1);
-
-	/* set this immediately, we need to know before
-	 * we talk to the chip directly */
-	DPRINTK("PIO region size == 0x%02X\n", pio_len);
-	DPRINTK("MMIO region size == 0x%02lX\n", mmio_len);
-
-	/* make sure PCI base addr 0 is PIO */
-	if (!(pio_flags & IORESOURCE_IO)) {
-		printk (KERN_ERR PFX "region #0 not a PIO resource, aborting\n");
-		rc = -ENODEV;
-		goto err_out;
-	}
-
-	/* make sure PCI base addr 1 is MMIO */
-	if (!(mmio_flags & IORESOURCE_MEM)) {
-		printk (KERN_ERR PFX "region #1 not an MMIO resource, aborting\n");
-		rc = -ENODEV;
-		goto err_out;
-	}
-
-	/* check for weird/broken PCI region reporting */
-	if ((pio_len < NETDRV_MIN_IO_SIZE) ||
-	    (mmio_len < NETDRV_MIN_IO_SIZE)) {
-		printk (KERN_ERR PFX "Invalid PCI region size(s), aborting\n");
-		rc = -ENODEV;
-		goto err_out;
-	}
-
-	rc = pci_request_regions (pdev, "pci-skeleton");
-	if (rc)
-		goto err_out;
-
-	pci_set_master (pdev);
-
-#ifdef USE_IO_OPS
-	ioaddr = (void *) pio_start;
-#else
-	/* ioremap MMIO region */
-	ioaddr = ioremap (mmio_start, mmio_len);
-	if (ioaddr == NULL) {
-		printk (KERN_ERR PFX "cannot remap MMIO, aborting\n");
-		rc = -EIO;
-		goto err_out_free_res;
-	}
-#endif /* USE_IO_OPS */
-
-	/* Soft reset the chip. */
-	NETDRV_W8 (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear) | CmdReset);
-
-	/* Check that the chip has finished the reset. */
-	for (i = 1000; i > 0; i--)
-		if ((NETDRV_R8 (ChipCmd) & CmdReset) == 0)
-			break;
-		else
-			udelay (10);
-
-	/* Bring the chip out of low-power mode. */
-	/* <insert device-specific code here> */
-
-#ifndef USE_IO_OPS
-	/* sanity checks -- ensure PIO and MMIO registers agree */
-	assert (inb (pio_start+Config0) == readb (ioaddr+Config0));
-	assert (inb (pio_start+Config1) == readb (ioaddr+Config1));
-	assert (inb (pio_start+TxConfig) == readb (ioaddr+TxConfig));
-	assert (inb (pio_start+RxConfig) == readb (ioaddr+RxConfig));
-#endif /* !USE_IO_OPS */
-
-	/* identify chip attached to board */
-	tmp = NETDRV_R8 (ChipVersion);
-	for (i = ARRAY_SIZE (rtl_chip_info) - 1; i >= 0; i--)
-		if (tmp == rtl_chip_info[i].version) {
-			tp->chipset = i;
-			goto match;
-		}
-
-	/* if unknown chip, assume array element #0, original RTL-8139 in this case */
-	printk (KERN_DEBUG PFX "PCI device %s: unknown chip version, assuming RTL-8139\n",
-		pdev->slot_name);
-	printk (KERN_DEBUG PFX "PCI device %s: TxConfig = 0x%lx\n", pdev->slot_name, NETDRV_R32 (TxConfig));
-	tp->chipset = 0;
-
-match:
-	DPRINTK ("chipset id (%d) == index %d, '%s'\n",
-		tmp,
-		tp->chipset,
-		rtl_chip_info[tp->chipset].name);
-
-	i = register_netdev (dev);
-	if (i)
-		goto err_out_unmap;
-
-	DPRINTK ("EXIT, returning 0\n");
-	*ioaddr_out = ioaddr;
-	*dev_out = dev;
+	if (pci_drv_register(&netfin_drv_id, dev) < 0)
+		return -ENODEV;
+	if (debug >= NETIF_MSG_DRV)	/* Emit version even if no cards detected. */
+		printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
 	return 0;
-
-err_out_unmap:
-#ifndef USE_IO_OPS
-	iounmap(ioaddr);
-err_out_free_res:
-#endif
-	pci_release_regions (pdev);
-err_out:
-	kfree (dev);
-	DPRINTK ("EXIT, returning %d\n", rc);
-	return rc;
 }
+#endif
 
-
-static int __devinit netdrv_init_one (struct pci_dev *pdev,
-				       const struct pci_device_id *ent)
+static void *netfin_probe1(struct pci_dev *pdev, void *init_dev,
+						   long ioaddr, int irq, int chip_idx, int card_idx)
 {
-	struct net_device *dev = NULL;
-	struct netdrv_private *tp;
-	int i, addr_len, option;
-	void *ioaddr = NULL;
-	static int board_idx = -1;
-	u8 tmp;
+	struct net_device *dev;
+	struct netdev_private *np;
+	void *priv_mem;
+	int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+
+	dev = init_etherdev(init_dev, 0);
+	if (!dev)
+		return NULL;
+
+	/* Perhaps NETIF_MSG_PROBE */
+	printk(KERN_INFO "%s: %s at 0x%lx, ",
+		   dev->name, pci_id_tbl[chip_idx].name, ioaddr);
 
-/* when built into the kernel, we only print version if device is found */
-#ifndef MODULE
-	static int printed_version;
-	if (!printed_version++)
-		printk(version);
+	for (i = 0; i < 3; i++)
+		((u16 *)dev->dev_addr)[i] =
+			le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
+	if (memcmp(dev->dev_addr, "\0\0\0\0\0", 6) == 0) {
+		printk(" (MISSING EEPROM ADDRESS)");
+		memcpy(dev->dev_addr, "\100Linux", 6);
+	}
+	for (i = 0; i < 5; i++)
+		printk("%2.2x:", dev->dev_addr[i]);
+	printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+#if ! defined(final_version) /* Dump the EEPROM contents during development. */
+	if (debug > 4)
+		for (i = 0; i < 0x40; i++)
+			printk("%4.4x%s",
+				   eeprom_read(ioaddr, i), i % 16 != 15 ? " " : "\n");
 #endif
 
-	DPRINTK ("ENTER\n");
-
-	assert (pdev != NULL);
-	assert (ent != NULL);
-
-	board_idx++;
+	/* Make certain elements e.g. descriptor lists are aligned. */
+	priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+	/* Check for the very unlikely case of no memory. */
+	if (priv_mem == NULL)
+		return NULL;
 
-	i = netdrv_init_board (pdev, &dev, &ioaddr);
-	if (i < 0) {
-		DPRINTK ("EXIT, returning %d\n", i);
-		return i;
-	}
-
-	tp = dev->priv;
-
-	assert (ioaddr != NULL);
-	assert (dev != NULL);
-	assert (tp != NULL);
+	/* Do bogusness checks before this point.
+	   We do a request_region() only to register /proc/ioports info. */
+#ifdef USE_IO_OPS
+	request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
+#endif
 
-	addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6;
-	for (i = 0; i < 3; i++)
-		((u16 *) (dev->dev_addr))[i] =
-		    le16_to_cpu (read_eeprom (ioaddr, i + 7, addr_len));
+	/* Reset the chip to erase previous misconfiguration. */
+	writel(ChipResetCmd, ioaddr + ChipReset);
 
-	/* The Rtl8139-specific entries in the device structure. */
-	dev->open = netdrv_open;
-	dev->hard_start_xmit = netdrv_start_xmit;
-	dev->stop = netdrv_close;
-	dev->get_stats = netdrv_get_stats;
-	dev->set_multicast_list = netdrv_set_rx_mode;
-	dev->do_ioctl = netdrv_ioctl;
-	dev->tx_timeout = netdrv_tx_timeout;
-	dev->watchdog_timeo = TX_TIMEOUT;
-
-	dev->irq = pdev->irq;
-	dev->base_addr = (unsigned long) ioaddr;
-
-	/* dev->priv/tp zeroed and aligned in init_etherdev */
-	tp = dev->priv;
-
-	/* note: tp->chipset set in netdrv_init_board */
-	tp->drv_flags = PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
-			PCI_COMMAND_MASTER | NETDRV_CAPS;
-	tp->pci_dev = pdev;
-	tp->board = ent->driver_data;
-	tp->mmio_addr = ioaddr;
-	tp->lock = SPIN_LOCK_UNLOCKED;
-
-	pci_set_drvdata(pdev, dev);
-
-	tp->phys[0] = 32;
-
-	printk (KERN_INFO "%s: %s at 0x%lx, "
-		"%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
-		"IRQ %d\n",
-		dev->name,
-		board_info[ent->driver_data].name,
-		dev->base_addr,
-		dev->dev_addr[0], dev->dev_addr[1],
-		dev->dev_addr[2], dev->dev_addr[3],
-		dev->dev_addr[4], dev->dev_addr[5],
-		dev->irq);
+	dev->base_addr = ioaddr;
+	dev->irq = irq;
 
-	printk (KERN_DEBUG "%s:  Identified 8139 chip type '%s'\n",
-		dev->name, rtl_chip_info[tp->chipset].name);
+	dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+	memset(np, 0, sizeof(*np));
+	np->priv_addr = priv_mem;
+
+	np->next_module = root_net_dev;
+	root_net_dev = dev;
+
+	np->pci_dev = pdev;
+	np->chip_id = chip_idx;
+	np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
+	np->msg_level = (1 << debug) - 1;
+	np->rx_copybreak = rx_copybreak;
+	np->max_interrupt_work = max_interrupt_work;
+	np->multicast_filter_limit = multicast_filter_limit;
 
-	/* Put the chip into low-power mode. */
-	NETDRV_W8_F (Cfg9346, Cfg9346_Unlock);
+	if (dev->mem_start)
+		option = dev->mem_start;
 
 	/* The lower four bits are the media type. */
-	option = (board_idx > 7) ? 0 : media[board_idx];
 	if (option > 0) {
-		tp->full_duplex = (option & 0x200) ? 1 : 0;
-		tp->default_port = option & 15;
-		if (tp->default_port)
-			tp->medialock = 1;
+		if (option & 0x220)
+			np->full_duplex = 1;
+		np->default_port = option & 15;
+		if (np->default_port)
+			np->medialock = 1;
+	}
+	if (card_idx < MAX_UNITS  &&  full_duplex[card_idx] > 0)
+		np->full_duplex = 1;
+
+	if (np->full_duplex) {
+		if (np->msg_level & NETIF_MSG_PROBE)
+			printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
+				   " disabled.\n", dev->name);
+		np->duplex_lock = 1;
+	}
+
+	/* The chip-specific entries in the device structure. */
+	dev->open = &netdev_open;
+	dev->hard_start_xmit = &start_tx;
+	dev->stop = &netdev_close;
+	dev->get_stats = &get_stats;
+	dev->set_multicast_list = &set_rx_mode;
+	dev->do_ioctl = &mii_ioctl;
+	dev->change_mtu = &change_mtu;
+
+	if (np->drv_flags & CanHaveMII) {
+		int phy, phy_idx = 0;
+		mii_preamble_required++;
+		/* In some cases the search should begin with #0. */
+		for (phy = 1; phy < 32 && phy_idx < 4; phy++) {
+			int mii_status = mdio_read(dev, phy, 1);
+			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
+				np->phys[phy_idx++] = phy;
+				np->advertising = mdio_read(dev, phy, 4);
+				if ((mii_status & 0x0040) == 0)
+					mii_preamble_required++;
+				if (np->msg_level & NETIF_MSG_PROBE)
+					printk(KERN_INFO "%s: MII PHY found at address %d, status "
+						   "0x%4.4x advertising %4.4x.\n",
+						   dev->name, phy, mii_status, np->advertising);
+			}
+		}
+		mii_preamble_required--;
+		np->mii_cnt = phy_idx;
 	}
 
-	if (tp->full_duplex) {
-		printk (KERN_INFO
-			"%s: Media type forced to Full Duplex.\n",
-			dev->name);
-		mdio_write (dev, tp->phys[0], MII_ADVERTISE, ADVERTISE_FULL);
-		tp->duplex_lock = 1;
+	/* Allow forcing the media type. */
+	if (option > 0) {
+		if (option & 0x220)
+			np->full_duplex = 1;
+		np->default_port = option & 0x3ff;
+		if (np->default_port & 0x330) {
+			np->medialock = 1;
+			if (np->msg_level & NETIF_MSG_PROBE)
+				printk(KERN_INFO "  Forcing %dMbs %s-duplex operation.\n",
+					   (option & 0x300 ? 100 : 10),
+					   (np->full_duplex ? "full" : "half"));
+			if (np->mii_cnt)
+				mdio_write(dev, np->phys[0], 0,
+						   ((option & 0x300) ? 0x2000 : 0) | 	/* 100mbps? */
+						   (np->full_duplex ? 0x0100 : 0)); /* Full duplex? */
+		}
 	}
 
-	DPRINTK ("EXIT - returning 0\n");
-	return 0;
-}
-
-
-static void __devexit netdrv_remove_one (struct pci_dev *pdev)
-{
-	struct net_device *dev = pci_get_drvdata (pdev);
-	struct netdrv_private *np;
-
-	DPRINTK ("ENTER\n");
-
-	assert (dev != NULL);
-
-	np = dev->priv;
-	assert (np != NULL);
-
-	unregister_netdev (dev);
-
-#ifndef USE_IO_OPS
-	iounmap (np->mmio_addr);
-#endif /* !USE_IO_OPS */
-
-	pci_release_regions (pdev);
-
-#ifndef NETDRV_NDEBUG
-	/* poison memory before freeing */
-	memset (dev, 0xBC,
-		sizeof (struct net_device) +
-		sizeof (struct netdrv_private));
-#endif /* NETDRV_NDEBUG */
-
-	kfree (dev);
-
-	pci_set_drvdata (pdev, NULL);
-
-	pci_power_off (pdev, -1);
-
-	DPRINTK ("EXIT\n");
+	return dev;
 }
 
-
-/* Serial EEPROM section. */
-
-/*  EEPROM_Ctrl bits. */
-#define EE_SHIFT_CLK	0x04	/* EEPROM shift clock. */
-#define EE_CS			0x08	/* EEPROM chip select. */
-#define EE_DATA_WRITE	0x02	/* EEPROM chip data in. */
-#define EE_WRITE_0		0x00
-#define EE_WRITE_1		0x02
-#define EE_DATA_READ	0x01	/* EEPROM chip data out. */
-#define EE_ENB			(0x80 | EE_CS)
+
+/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
+   These are often serial bit streams generated by the host processor.
+   The example below is for the common 93c46 EEPROM, 64 16 bit words. */
 
 /* Delay between EEPROM clock transitions.
-   No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
- */
+   This "delay" forces out buffered PCI writes.
+   Typically no extra delay is needed.
+   Note that pre-2.0.34 kernels had a cache-alignment bug that made
+   udelay() unreliable.
+*/
+#define eeprom_delay(ee_addr)	readl(ee_addr)
 
-#define eeprom_delay()	readl(ee_addr)
+/* Note carefully if "DataIn" refers to the NIC or EEPROM viewpoint. */
+enum EEPROM_Ctrl_Bits {
+	EE_ShiftClk=0x01, EE_DataBit=0x02, EE_ChipSelect=0x04, EE_DataDir=0x08,
+};
+#define EE_Write0 (EE_DataDir | EE_ChipSelect)
+#define EE_Write1 (EE_DataDir | EE_ChipSelect | EE_DataBit)
 
-/* The EEPROM commands include the alway-set leading bit. */
-#define EE_WRITE_CMD	(5)
-#define EE_READ_CMD		(6)
-#define EE_ERASE_CMD	(7)
+/* The EEPROM commands always start with 01.. preamble bits.
+   Commands are prepended to the variable-length address. */
+enum EEPROM_Cmds { EE_WriteCmd=5, EE_ReadCmd=6, EE_EraseCmd=7, };
 
-static int __devinit read_eeprom (void *ioaddr, int location, int addr_len)
+static int eeprom_read(long addr, int location)
 {
 	int i;
-	unsigned retval = 0;
-	void *ee_addr = ioaddr + Cfg9346;
-	int read_cmd = location | (EE_READ_CMD << addr_len);
-
-	DPRINTK ("ENTER\n");
-
-	writeb (EE_ENB & ~EE_CS, ee_addr);
-	writeb (EE_ENB, ee_addr);
-	eeprom_delay ();
+	int retval = 0;
+	long ee_addr = addr + EECtrl;
+	int read_cmd = location | (EE_ReadCmd<<6);
 
+	writel(EE_DataDir, ee_addr);
 	/* Shift the read command bits out. */
-	for (i = 4 + addr_len; i >= 0; i--) {
-		int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
-		writeb (EE_ENB | dataval, ee_addr);
-		eeprom_delay ();
-		writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
-		eeprom_delay ();
+	for (i = 10; i >= 0; i--) {
+		short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
+		writel(dataval, ee_addr);
+		eeprom_delay(ee_addr);
+		writel(dataval | EE_ShiftClk, ee_addr);
+		eeprom_delay(ee_addr);
 	}
-	writeb (EE_ENB, ee_addr);
-	eeprom_delay ();
+	writel(EE_ChipSelect, ee_addr);
+	eeprom_delay(ee_addr);
 
 	for (i = 16; i > 0; i--) {
-		writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
-		eeprom_delay ();
-		retval =
-		    (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
-				     0);
-		writeb (EE_ENB, ee_addr);
-		eeprom_delay ();
+		writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
+		eeprom_delay(ee_addr);
+		retval = (retval << 1) | ((readl(ee_addr) & EE_DataBit) ? 1 : 0);
+		writel(EE_ChipSelect, ee_addr);
+		eeprom_delay(ee_addr);
 	}
 
 	/* Terminate the EEPROM access. */
-	writeb (~EE_CS, ee_addr);
-	eeprom_delay ();
-
-	DPRINTK ("EXIT - returning %d\n", retval);
+	writel(EE_DataDir, ee_addr);
+	writel(0, ee_addr);
 	return retval;
 }
 
-/* MII serial management: mostly bogus for now. */
-/* Read and write the MII management registers using software-generated
-   serial MDIO protocol.
-   The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
-   met by back-to-back PCI I/O cycles, but we insert a delay to avoid
-   "overclocking" issues. */
-#define MDIO_DIR		0x80
-#define MDIO_DATA_OUT	0x04
-#define MDIO_DATA_IN	0x02
-#define MDIO_CLK		0x01
-#define MDIO_WRITE0 (MDIO_DIR)
-#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT)
-
-#define mdio_delay()	readb(mdio_addr)
-
-
-static char mii_2_8139_map[8] = {
-	BasicModeCtrl,
-	BasicModeStatus,
-	0,
-	0,
-	NWayAdvert,
-	NWayLPAR,
-	NWayExpansion,
-	0
-};
+/*  MII transceiver control section.
+	Read and write the MII registers using software-generated serial
+	MDIO protocol.  See the MII specifications or DP83840A data sheet
+	for details.
 
+	The maximum data clock rate is 2.5 Mhz.
+	The timing is decoupled from the processor clock by flushing the write
+	from the CPU write buffer with a following read, and using PCI
+	transaction time. */
+#define mdio_in(mdio_addr) readl(mdio_addr)
+#define mdio_out(value, mdio_addr) writel(value, mdio_addr)
+#define mdio_delay(mdio_addr) readl(mdio_addr)
 
-/* Syncronize the MII management interface by shifting 32 one bits out. */
-static void mdio_sync (void *mdio_addr)
-{
-	int i;
+enum mii_reg_bits {
+	MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
+};
+#define MDIO_EnbIn  (0)
+#define MDIO_WRITE0 (MDIO_EnbOutput)
+#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
 
-	DPRINTK ("ENTER\n");
+/* Generate the preamble required for initial synchronization and
+   a few older transceivers. */
+static void mdio_sync(long mdio_addr)
+{
+	int bits = 32;
 
-	for (i = 32; i >= 0; i--) {
-		writeb (MDIO_WRITE1, mdio_addr);
-		mdio_delay ();
-		writeb (MDIO_WRITE1 | MDIO_CLK, mdio_addr);
-		mdio_delay ();
+	/* Establish sync by sending at least 32 logic ones. */
+	while (--bits >= 0) {
+		mdio_out(MDIO_WRITE1, mdio_addr);
+		mdio_delay(mdio_addr);
+		mdio_out(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
+		mdio_delay(mdio_addr);
 	}
-
-	DPRINTK ("EXIT\n");
 }
 
-
-static int mdio_read (struct net_device *dev, int phy_id, int location)
+static int mdio_read(struct net_device *dev, int phy_id, unsigned int location)
 {
-	struct netdrv_private *tp = dev->priv;
-	void *mdio_addr = tp->mmio_addr + Config4;
+	long mdio_addr = dev->base_addr + MIICtrl;
 	int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
-	int retval = 0;
-	int i;
+	int i, retval = 0;
 
-	DPRINTK ("ENTER\n");
+	if (mii_preamble_required)
+		mdio_sync(mdio_addr);
 
-	if (phy_id > 31) {	/* Really a 8139.  Use internal registers. */
-		DPRINTK ("EXIT after directly using 8139 internal regs\n");
-		return location < 8 && mii_2_8139_map[location] ?
-		    readw (tp->mmio_addr + mii_2_8139_map[location]) : 0;
-	}
-	mdio_sync (mdio_addr);
 	/* Shift the read command bits out. */
 	for (i = 15; i >= 0; i--) {
-		int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0;
+		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
 
-		writeb (MDIO_DIR | dataval, mdio_addr);
-		mdio_delay ();
-		writeb (MDIO_DIR | dataval | MDIO_CLK, mdio_addr);
-		mdio_delay ();
+		mdio_out(dataval, mdio_addr);
+		mdio_delay(mdio_addr);
+		mdio_out(dataval | MDIO_ShiftClk, mdio_addr);
+		mdio_delay(mdio_addr);
 	}
-
 	/* Read the two transition, 16 data, and wire-idle bits. */
 	for (i = 19; i > 0; i--) {
-		writeb (0, mdio_addr);
-		mdio_delay ();
-		retval =
-		    (retval << 1) | ((readb (mdio_addr) & MDIO_DATA_IN) ? 1
-				     : 0);
-		writeb (MDIO_CLK, mdio_addr);
-		mdio_delay ();
+		mdio_out(MDIO_EnbIn, mdio_addr);
+		mdio_delay(mdio_addr);
+		retval = (retval << 1) | ((mdio_in(mdio_addr) & MDIO_Data) ? 1 : 0);
+		mdio_out(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+		mdio_delay(mdio_addr);
 	}
-
-	DPRINTK ("EXIT, returning %d\n", (retval >> 1) & 0xffff);
-	return (retval >> 1) & 0xffff;
+	return (retval>>1) & 0xffff;
 }
 
-
-static void mdio_write (struct net_device *dev, int phy_id, int location,
-			int value)
+static void mdio_write(struct net_device *dev, int phy_id,
+					   unsigned int location, int value)
 {
-	struct netdrv_private *tp = dev->priv;
-	void *mdio_addr = tp->mmio_addr + Config4;
-	int mii_cmd =
-	    (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
+	long mdio_addr = dev->base_addr + MIICtrl;
+	int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
 	int i;
 
-	DPRINTK ("ENTER\n");
-
-	if (phy_id > 31) {	/* Really a 8139.  Use internal registers. */
-		if (location < 8 && mii_2_8139_map[location]) {
-			writew (value,
-				tp->mmio_addr + mii_2_8139_map[location]);
-			readw (tp->mmio_addr + mii_2_8139_map[location]);
-		}
-		DPRINTK ("EXIT after directly using 8139 internal regs\n");
-		return;
-	}
-	mdio_sync (mdio_addr);
+	if (mii_preamble_required)
+		mdio_sync(mdio_addr);
 
 	/* Shift the command bits out. */
 	for (i = 31; i >= 0; i--) {
-		int dataval =
-		    (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
-		writeb (dataval, mdio_addr);
-		mdio_delay ();
-		writeb (dataval | MDIO_CLK, mdio_addr);
-		mdio_delay ();
-	}
+		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
 
+		mdio_out(dataval, mdio_addr);
+		mdio_delay(mdio_addr);
+		mdio_out(dataval | MDIO_ShiftClk, mdio_addr);
+		mdio_delay(mdio_addr);
+	}
 	/* Clear out extra bits. */
 	for (i = 2; i > 0; i--) {
-		writeb (0, mdio_addr);
-		mdio_delay ();
-		writeb (MDIO_CLK, mdio_addr);
-		mdio_delay ();
+		mdio_out(MDIO_EnbIn, mdio_addr);
+		mdio_delay(mdio_addr);
+		mdio_out(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+		mdio_delay(mdio_addr);
 	}
-
-	DPRINTK ("EXIT\n");
+	return;
 }
 
-
-static int netdrv_open (struct net_device *dev)
+
+static int netdev_open(struct net_device *dev)
 {
-	struct netdrv_private *tp = dev->priv;
-	int retval;
-#ifdef NETDRV_DEBUG
-	void *ioaddr = tp->mmio_addr;
-#endif
-
-	DPRINTK ("ENTER\n");
-
-	retval = request_irq (dev->irq, netdrv_interrupt, SA_SHIRQ, dev->name, dev);
-	if (retval) {
-		DPRINTK ("EXIT, returning %d\n", retval);
-		return retval;
-	}
-
-	tp->tx_bufs = pci_alloc_consistent(tp->pci_dev, TX_BUF_TOT_LEN,
-					   &tp->tx_bufs_dma);
-	tp->rx_ring = pci_alloc_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
-					   &tp->rx_ring_dma);
-	if (tp->tx_bufs == NULL || tp->rx_ring == NULL) {
-		free_irq(dev->irq, dev);
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int i;
 
-		if (tp->tx_bufs)
-			pci_free_consistent(tp->pci_dev, TX_BUF_TOT_LEN,
-					    tp->tx_bufs, tp->tx_bufs_dma);
-		if (tp->rx_ring)
-			pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
-					    tp->rx_ring, tp->rx_ring_dma);
+	/* Some chips may need to be reset. */
 
-		DPRINTK ("EXIT, returning -ENOMEM\n");
-		return -ENOMEM;
+	MOD_INC_USE_COUNT;
 
+	/* Note that both request_irq() and init_ring() call kmalloc(), which
+	   break the global kernel lock protecting this routine. */
+	if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
+		MOD_DEC_USE_COUNT;
+		return -EAGAIN;
 	}
 
-	tp->full_duplex = tp->duplex_lock;
-	tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000;
+	if (np->msg_level & NETIF_MSG_IFUP)
+		printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
+			   dev->name, dev->irq);
 
-	netdrv_init_ring (dev);
-	netdrv_hw_start (dev);
+	init_ring(dev);
 
-	DPRINTK ("%s: netdrv_open() ioaddr %#lx IRQ %d"
-			" GP Pins %2.2x %s-duplex.\n",
-			dev->name, pci_resource_start (tp->pci_dev, 1),
-			dev->irq, NETDRV_R8 (MediaStatus),
-			tp->full_duplex ? "full" : "half");
+#if ADDRLEN == 64
+	writel(virt_to_bus(np->rx_ring) >> 32, ioaddr + RxRingPtr + 4);
+	writel(virt_to_bus(np->tx_ring) >> 32, ioaddr + TxRingPtr + 4);
+#endif
+	writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
+	writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
 
-	/* Set the timer to switch to check for link beat and perhaps switch
-	   to an alternate media type. */
-	init_timer (&tp->timer);
-	tp->timer.expires = jiffies + 3 * HZ;
-	tp->timer.data = (unsigned long) dev;
-	tp->timer.function = &netdrv_timer;
-	add_timer (&tp->timer);
+	for (i = 0; i < 6; i++)
+		writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
+#if 0
+	/* Or, if Address register must be written as words. */
+	writel(cpu_to_le32(cpu_to_le32(get_unaligned((u32 *)dev->dev_addr))),
+					   ioaddr + StationAddr);
+	writel(cpu_to_le16(cpu_to_le16(get_unaligned((u16 *)(dev->dev_addr+4)))),
+					   ioaddr + StationAddr + 4);
+#endif
+
+	/* Initialize other registers. */
+	/* Configure the PCI bus bursts and FIFO thresholds. */
+	writel(0x0000, ioaddr + PCIBusCfg);
+	writel(0x0000, ioaddr + FIFOCfg);
+
+	if (dev->if_port == 0)
+		dev->if_port = np->default_port;
+
+	np->in_interrupt = 0;
+
+	set_rx_mode(dev);
+	netif_start_tx_queue(dev);
+
+	/* Enable interrupts by setting the interrupt mask. */
+	np->intr_enable = IntrRxDone | IntrRxEmpty | IntrRxPCIErr |
+		IntrTxDone | IntrTxEmpty | IntrTxPCIErr | StatsMax | LinkChange;
+	writel(np->intr_enable, ioaddr + IntrEnable);
+
+	writel(RxEnable, dev->base_addr + RxCmd);
+
+	if (np->msg_level & NETIF_MSG_IFUP)
+		printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x.\n",
+			   dev->name, (int)readl(ioaddr + RxStatus),
+			   (int)readl(ioaddr + TxStatus));
+
+	/* Set the timer to check for link beat. */
+	init_timer(&np->timer);
+	np->timer.expires = jiffies + 3*HZ;
+	np->timer.data = (unsigned long)dev;
+	np->timer.function = &netdev_timer;				/* timer handler */
+	add_timer(&np->timer);
 
-	DPRINTK ("EXIT, returning 0\n");
 	return 0;
 }
 
-
-/* Start the hardware at open or resume. */
-static void netdrv_hw_start (struct net_device *dev)
+/* This is only needed if the chip supports >1500 byte frames.
+   Changing the MTU while active is usually race prone or impossible, thus
+   no configuration relies on the capability.
+ */
+static int change_mtu(struct net_device *dev, int new_mtu)
 {
-	struct netdrv_private *tp = dev->priv;
-	void *ioaddr = tp->mmio_addr;
-	u32 i;
-	u8 tmp;
-
-	DPRINTK ("ENTER\n");
-
-	/* Soft reset the chip. */
-	NETDRV_W8 (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear) | CmdReset);
-	udelay (100);
-
-	/* Check that the chip has finished the reset. */
-	for (i = 1000; i > 0; i--)
-		if ((NETDRV_R8 (ChipCmd) & CmdReset) == 0)
-			break;
-
-	/* Restore our idea of the MAC address. */
-	NETDRV_W32_F (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0)));
-	NETDRV_W32_F (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
-
-	/* Must enable Tx/Rx before setting transfer thresholds! */
-	NETDRV_W8_F (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear) |
-			   CmdRxEnb | CmdTxEnb);
-
-	i = netdrv_rx_config |
-	    (NETDRV_R32 (RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
-	NETDRV_W32_F (RxConfig, i);
-
-	/* Check this value: the documentation for IFG contradicts ifself. */
-	NETDRV_W32 (TxConfig, (TX_DMA_BURST << TxDMAShift));
-
-	/* unlock Config[01234] and BMCR register writes */
-	NETDRV_W8_F (Cfg9346, Cfg9346_Unlock);
-	udelay (10);
-
-	tp->cur_rx = 0;
-
-	/* Lock Config[01234] and BMCR register writes */
-	NETDRV_W8_F (Cfg9346, Cfg9346_Lock);
-	udelay (10);
-
-	/* init Rx ring buffer DMA address */
-	NETDRV_W32_F (RxBuf, tp->rx_ring_dma);
-
-	/* init Tx buffer DMA addresses */
-	for (i = 0; i < NUM_TX_DESC; i++)
-		NETDRV_W32_F (TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs));
-
-	NETDRV_W32_F (RxMissed, 0);
-
-	netdrv_set_rx_mode (dev);
-
-	/* no early-rx interrupts */
-	NETDRV_W16 (MultiIntr, NETDRV_R16 (MultiIntr) & MultiIntrClear);
-
-	/* make sure RxTx has started */
-	NETDRV_W8_F (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear) |
-			   CmdRxEnb | CmdTxEnb);
-
-	/* Enable all known interrupts by setting the interrupt mask. */
-	NETDRV_W16_F (IntrMask, netdrv_intr_mask);
-
-	netif_start_queue (dev);
-
-	DPRINTK ("EXIT\n");
+	if ((new_mtu < 68) || (new_mtu > 1500))
+		return -EINVAL;
+	if (netif_running(dev))
+		return -EBUSY;
+	dev->mtu = new_mtu;
+	return 0;
 }
 
-
-/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
-static void netdrv_init_ring (struct net_device *dev)
+static void check_duplex(struct net_device *dev)
 {
-	struct netdrv_private *tp = dev->priv;
-	int i;
-
-	DPRINTK ("ENTER\n");
-
-	tp->cur_rx = 0;
-	atomic_set (&tp->cur_tx, 0);
-	atomic_set (&tp->dirty_tx, 0);
-
-	for (i = 0; i < NUM_TX_DESC; i++) {
-		tp->tx_info[i].skb = NULL;
-		tp->tx_info[i].mapping = 0;
-		tp->tx_buf[i] = &tp->tx_bufs[i * TX_BUF_SIZE];
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int old_tx_mode = np->tx_config;
+
+	if (np->medialock) {
+		if (np->full_duplex)
+			np->tx_config |= 1;
+	} else {
+		int mii_reg5 = mdio_read(dev, np->phys[0], 5);
+		int negotiated = mii_reg5 & np->advertising;
+		int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
+		if (np->duplex_lock  ||  mii_reg5 == 0xffff)
+			return;
+		if (duplex)
+			np->tx_config |= TxModeFDX;
+		else
+			np->tx_config &= ~TxModeFDX;
+		if (np->full_duplex != duplex) {
+			np->full_duplex = duplex;
+			if (np->msg_level & NETIF_MSG_LINK)
+				printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d"
+					   " negotiated capability %4.4x.\n", dev->name,
+					   duplex ? "full" : "half", np->phys[0], negotiated);
+		}
 	}
-
-	DPRINTK ("EXIT\n");
+	if (old_tx_mode != np->tx_config)
+		writew(np->tx_config, ioaddr + TxMode);
 }
 
-
-static void netdrv_timer (unsigned long data)
+static void netdev_timer(unsigned long data)
 {
-	struct net_device *dev = (struct net_device *) data;
-	struct netdrv_private *tp = dev->priv;
-	void *ioaddr = tp->mmio_addr;
-	int next_tick = 60 * HZ;
-	int mii_lpa;
-
-	mii_lpa = mdio_read (dev, tp->phys[0], MII_LPA);
-
-	if (!tp->duplex_lock && mii_lpa != 0xffff) {
-		int duplex = (mii_lpa & LPA_100FULL)
-		    || (mii_lpa & 0x01C0) == 0x0040;
-		if (tp->full_duplex != duplex) {
-			tp->full_duplex = duplex;
-			printk (KERN_INFO
-				"%s: Setting %s-duplex based on MII #%d link"
-				" partner ability of %4.4x.\n", dev->name,
-				tp->full_duplex ? "full" : "half",
-				tp->phys[0], mii_lpa);
-			NETDRV_W8 (Cfg9346, Cfg9346_Unlock);
-			NETDRV_W8 (Config1, tp->full_duplex ? 0x60 : 0x20);
-			NETDRV_W8 (Cfg9346, Cfg9346_Lock);
-		}
+	struct net_device *dev = (struct net_device *)data;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int next_tick = 10*HZ;
+
+	if (np->msg_level & NETIF_MSG_TIMER) {
+		printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x, "
+			   "Tx %x Rx %x.\n",
+			   dev->name, (int)readl(ioaddr + IntrStatus),
+			   (int)readl(ioaddr + TxStatus), (int)readl(ioaddr + RxStatus));
+	}
+	/* This will either have a small false-trigger window or will not catch
+	   tbusy incorrectly set when the queue is empty. */
+	if (netif_queue_paused(dev)  &&
+		np->cur_tx - np->dirty_tx > 1  &&
+		(jiffies - dev->trans_start) > TX_TIMEOUT) {
+		tx_timeout(dev);
+	}
+	check_duplex(dev);
+	np->timer.expires = jiffies + next_tick;
+	add_timer(&np->timer);
+}
+
+static void tx_timeout(struct net_device *dev)
+{
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+
+	printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
+		   " resetting...\n", dev->name, (int)readl(ioaddr + TxStatus));
+
+#ifndef __alpha__
+	if (np->msg_level & NETIF_MSG_TX_ERR) {
+		int i;
+		printk(KERN_DEBUG "  Rx ring %p: ", np->rx_ring);
+		for (i = 0; i < RX_RING_SIZE; i++)
+			printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
+		printk("\n"KERN_DEBUG"  Tx ring %p: ", np->tx_ring);
+		for (i = 0; i < TX_RING_SIZE; i++)
+			printk(" %8.8x", np->tx_ring[i].status);
+		printk("\n");
 	}
+#endif
+
+	/* Perhaps we should reinitialize the hardware here. */
+	dev->if_port = 0;
+	/* Stop and restart the chip's Tx processes . */
 
-	DPRINTK ("%s: Media selection tick, Link partner %4.4x.\n",
-		 dev->name, NETDRV_R16 (NWayLPAR));
-	DPRINTK ("%s:  Other registers are IntMask %4.4x IntStatus %4.4x"
-		 " RxStatus %4.4x.\n", dev->name,
-		 NETDRV_R16 (IntrMask),
-		 NETDRV_R16 (IntrStatus),
-		 NETDRV_R32 (RxEarlyStatus));
-	DPRINTK ("%s:  Chip config %2.2x %2.2x.\n",
-		 dev->name, NETDRV_R8 (Config0),
-		 NETDRV_R8 (Config1));
+	/* Trigger an immediate transmit demand. */
 
-	tp->timer.expires = jiffies + next_tick;
-	add_timer (&tp->timer);
+	dev->trans_start = jiffies;
+	np->stats.tx_errors++;
+	return;
 }
 
 
-static void netdrv_tx_clear (struct netdrv_private *tp)
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
 {
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
 	int i;
 
-	atomic_set (&tp->cur_tx, 0);
-	atomic_set (&tp->dirty_tx, 0);
+	np->tx_full = 0;
+	np->cur_rx = np->cur_tx = 0;
+	np->dirty_rx = np->dirty_tx = 0;
+
+	/* Use 1518/+18 if the CRC is transferred. */
+	np->rx_buf_sz = (dev->mtu <= 1522 ? PKT_BUF_SZ : dev->mtu + 14);
+	np->rx_head_desc = &np->rx_ring[0];
+
+	/* Initialize all Rx descriptors. */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		np->rx_ring[i].length = cpu_to_le32(np->rx_buf_sz);
+		np->rx_ring[i].status = 0;
+		/* np->rx_ring[i].next_desc = virt_to_le32desc(&np->rx_ring[i+1]);*/
+		np->rx_skbuff[i] = 0;
+	}
+	/* Mark the last entry as wrapping the ring. */
+	np->rx_ring[i-1].status |= cpu_to_le32(DescEndRing);
+	/* Or np->rx_ring[i-1].next_desc = virt_to_le32desc(&np->rx_ring[0]);*/
+
+	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+		np->rx_skbuff[i] = skb;
+		if (skb == NULL)
+			break;
+		skb->dev = dev;			/* Mark as being used by this device. */
+		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
+		np->rx_ring[i].addr = virt_to_le32desc(skb->tail);
+		np->rx_ring[i].status = cpu_to_le32(DescOwn | DescIntr);
+	}
+	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
 
-	/* Dump the unsent Tx packets. */
-	for (i = 0; i < NUM_TX_DESC; i++) {
-		struct ring_info *rp = &tp->tx_info[i];
-		if (rp->mapping != 0) {
-			pci_unmap_single (tp->pci_dev, rp->mapping,
-					  rp->skb->len, PCI_DMA_TODEVICE);
-			rp->mapping = 0;
-		}
-		if (rp->skb) {
-			dev_kfree_skb (rp->skb);
-			rp->skb = NULL;
-			tp->stats.tx_dropped++;
-		}
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		np->tx_skbuff[i] = 0;
+		np->tx_ring[i].status = 0;
+		/* Or np->tx_ring[i].next_desc = virt_to_le32desc(&np->tx_ring[i+1]);*/
 	}
+	/* Or np->tx_ring[i-1].next_desc = virt_to_le32desc(&np->tx_ring[0]); */
+	return;
 }
 
-
-static void netdrv_tx_timeout (struct net_device *dev)
+static int start_tx(struct sk_buff *skb, struct net_device *dev)
 {
-	struct netdrv_private *tp = dev->priv;
-	void *ioaddr = tp->mmio_addr;
-	int i;
-	u8 tmp8;
-	unsigned long flags;
-
-	DPRINTK ("%s: Transmit timeout, status %2.2x %4.4x "
-		 "media %2.2x.\n", dev->name,
-		 NETDRV_R8 (ChipCmd),
-		 NETDRV_R16 (IntrStatus),
-		 NETDRV_R8 (MediaStatus));
-
-	/* disable Tx ASAP, if not already */
-	tmp8 = NETDRV_R8 (ChipCmd);
-	if (tmp8 & CmdTxEnb)
-		NETDRV_W8 (ChipCmd, tmp8 & ~CmdTxEnb);
-
-	/* Disable interrupts by clearing the interrupt mask. */
-	NETDRV_W16 (IntrMask, 0x0000);
-
-	/* Emit info to figure out what went wrong. */
-	printk (KERN_DEBUG "%s: Tx queue start entry %d  dirty entry %d.\n",
-		dev->name, atomic_read (&tp->cur_tx),
-		atomic_read (&tp->dirty_tx));
-	for (i = 0; i < NUM_TX_DESC; i++)
-		printk (KERN_DEBUG "%s:  Tx descriptor %d is %8.8lx.%s\n",
-			dev->name, i, NETDRV_R32 (TxStatus0 + (i * 4)),
-			i == atomic_read (&tp->dirty_tx) % NUM_TX_DESC ?
-				" (queue head)" : "");
-
-	/* Stop a shared interrupt from scavenging while we are. */
-	spin_lock_irqsave (&tp->lock, flags);
-	
-	netdrv_tx_clear (tp);
-
-	spin_unlock_irqrestore (&tp->lock, flags);
-
-	/* ...and finally, reset everything */
-	netdrv_hw_start (dev);
-
-	netif_wake_queue (dev);
-}
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	unsigned entry;
 
+	/* Block a timer-based transmit from overlapping.  This happens when
+	   packets are presumed lost, and we use this check the Tx status. */
+	if (netif_pause_tx_queue(dev) != 0) {
+		/* This watchdog code is redundant with the media monitor timer. */
+		if (jiffies - dev->trans_start > TX_TIMEOUT)
+			tx_timeout(dev);
+		return 1;
+	}
 
-
-static int netdrv_start_xmit (struct sk_buff *skb, struct net_device *dev)
-{
-	struct netdrv_private *tp = dev->priv;
-	void *ioaddr = tp->mmio_addr;
-	int entry;
+	/* Note: Ordering is important here, set the field with the
+	   "ownership" bit last, and only then increment cur_tx. */
 
 	/* Calculate the next Tx descriptor entry. */
-	entry = atomic_read (&tp->cur_tx) % NUM_TX_DESC;
+	entry = np->cur_tx % TX_RING_SIZE;
 
-	assert (tp->tx_info[entry].skb == NULL);
-	assert (tp->tx_info[entry].mapping == 0);
+	np->tx_skbuff[entry] = skb;
 
-	tp->tx_info[entry].skb = skb;
-	/* tp->tx_info[entry].mapping = 0; */
-	memcpy (tp->tx_buf[entry], skb->data, skb->len);
-
-	/* Note: the chip doesn't have auto-pad! */
-	NETDRV_W32 (TxStatus0 + (entry * sizeof(u32)),
-		 tp->tx_flag | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
+	np->tx_ring[entry].addr = virt_to_le32desc(skb->data);
+	np->tx_ring[entry].length = cpu_to_le32(skb->len);
+	if (entry >= TX_RING_SIZE-1)		 /* Wrap ring */
+		np->tx_ring[entry].status =
+			cpu_to_le32(DescOwn|DescEndPacket|DescEndRing);
+	else
+		np->tx_ring[entry].status = cpu_to_le32(DescOwn|DescEndPacket);
+	np->cur_tx++;
+
+	/* On some architectures: explicitly flush cache lines here. */
+
+	if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
+		np->tx_full = 1;
+		/* Check for a just-cleared queue. */
+		if (np->cur_tx - (volatile unsigned int)np->dirty_tx
+			< TX_QUEUE_LEN - 2) {
+			np->tx_full = 0;
+			netif_unpause_tx_queue(dev);
+		} else
+			netif_stop_tx_queue(dev);
+	} else
+		netif_unpause_tx_queue(dev);		/* Typical path */
+	/* Wake the potentially-idle transmit channel. */
+	writel(TxPoll, dev->base_addr + TxCmd);
 
 	dev->trans_start = jiffies;
-	atomic_inc (&tp->cur_tx);
-	if ((atomic_read (&tp->cur_tx) - atomic_read (&tp->dirty_tx)) >= NUM_TX_DESC)
-		netif_stop_queue (dev);
-
-	DPRINTK ("%s: Queued Tx packet at %p size %u to slot %d.\n",
-		 dev->name, skb->data, skb->len, entry);
 
+	if (np->msg_level & NETIF_MSG_TX_QUEUED) {
+		printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
+			   dev->name, np->cur_tx, entry);
+	}
 	return 0;
 }
 
+/* The interrupt handler does all of the Rx thread work and cleans up
+   after the Tx thread. */
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+	struct net_device *dev = (struct net_device *)dev_instance;
+	struct netdev_private *np;
+	long ioaddr;
+	int boguscnt;
 
-static void netdrv_tx_interrupt (struct net_device *dev,
-				  struct netdrv_private *tp,
-				  void *ioaddr)
-{
-	int cur_tx, dirty_tx, tx_left;
-
-	assert (dev != NULL);
-	assert (tp != NULL);
-	assert (ioaddr != NULL);
-
-	dirty_tx = atomic_read (&tp->dirty_tx);
-
-	cur_tx = atomic_read (&tp->cur_tx);
-	tx_left = cur_tx - dirty_tx;
-	while (tx_left > 0) {
-		int entry = dirty_tx % NUM_TX_DESC;
-		int txstatus;
-
-		txstatus = NETDRV_R32 (TxStatus0 + (entry * sizeof (u32)));
-
-		if (!(txstatus & (TxStatOK | TxUnderrun | TxAborted)))
-			break;	/* It still hasn't been Txed */
-
-		/* Note: TxCarrierLost is always asserted at 100mbps. */
-		if (txstatus & (TxOutOfWindow | TxAborted)) {
-			/* There was an major error, log it. */
-			DPRINTK ("%s: Transmit error, Tx status %8.8x.\n",
-				 dev->name, txstatus);
-			tp->stats.tx_errors++;
-			if (txstatus & TxAborted) {
-				tp->stats.tx_aborted_errors++;
-				NETDRV_W32 (TxConfig, TxClearAbt | (TX_DMA_BURST << TxDMAShift));
-			}
-			if (txstatus & TxCarrierLost)
-				tp->stats.tx_carrier_errors++;
-			if (txstatus & TxOutOfWindow)
-				tp->stats.tx_window_errors++;
-		} else {
-			if (txstatus & TxUnderrun) {
-				/* Add 64 to the Tx FIFO threshold. */
-				if (tp->tx_flag < 0x00300000)
-					tp->tx_flag += 0x00020000;
-				tp->stats.tx_fifo_errors++;
-			}
-			tp->stats.collisions += (txstatus >> 24) & 15;
-			tp->stats.tx_bytes += txstatus & 0x7ff;
-			tp->stats.tx_packets++;
-		}
-
-		/* Free the original skb. */
-		if (tp->tx_info[entry].mapping != 0) {
-			pci_unmap_single(tp->pci_dev,
-					 tp->tx_info[entry].mapping,
-					 tp->tx_info[entry].skb->len,
-					 PCI_DMA_TODEVICE);
-			tp->tx_info[entry].mapping = 0;
-		}
-		dev_kfree_skb_irq (tp->tx_info[entry].skb);
-		tp->tx_info[entry].skb = NULL;
-		dirty_tx++;
-		if (dirty_tx < 0) { /* handle signed int overflow */
-			atomic_sub (cur_tx, &tp->cur_tx); /* XXX racy? */
-			dirty_tx = cur_tx - tx_left + 1;
-		}
-		if (netif_queue_stopped (dev))
-			netif_wake_queue (dev);
-
-		cur_tx = atomic_read (&tp->cur_tx);
-		tx_left = cur_tx - dirty_tx;
-
+#ifndef final_version			/* Can never occur. */
+	if (dev == NULL) {
+		printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown "
+				"device.\n", irq);
+		return;
 	}
+#endif
 
-#ifndef NETDRV_NDEBUG
-	if (atomic_read (&tp->cur_tx) - dirty_tx > NUM_TX_DESC) {
-		printk (KERN_ERR
-		  "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
-		     dev->name, dirty_tx, atomic_read (&tp->cur_tx));
-		dirty_tx += NUM_TX_DESC;
+	ioaddr = dev->base_addr;
+	np = (struct netdev_private *)dev->priv;
+	boguscnt = np->max_interrupt_work;
+
+#if defined(__i386__)  &&  LINUX_VERSION_CODE < 0x020300
+	/* A lock to prevent simultaneous entry bug on Intel SMP machines. */
+	if (test_and_set_bit(0, (void*)&dev->interrupt)) {
+		printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
+			   dev->name);
+		dev->interrupt = 0;	/* Avoid halting machine. */
+		return;
 	}
-#endif /* NETDRV_NDEBUG */
+#endif
 
-	atomic_set (&tp->dirty_tx, dirty_tx);
-}
+	do {
+		u32 intr_status = readl(ioaddr + IntrStatus);
 
+		/* Acknowledge all of the current interrupt sources ASAP. */
+		writel(intr_status & 0x0000ffff, ioaddr + IntrStatus);
 
-/* TODO: clean this up!  Rx reset need not be this intensive */
-static void netdrv_rx_err (u32 rx_status, struct net_device *dev,
-			    struct netdrv_private *tp, void *ioaddr)
-{
-	u8 tmp8;
-	int tmp_work = 1000;
+		if (np->msg_level & NETIF_MSG_INTR)
+			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
+				   dev->name, intr_status);
 
-	DPRINTK ("%s: Ethernet frame had errors, status %8.8x.\n",
-	         dev->name, rx_status);
-	if (rx_status & RxTooLong) {
-		DPRINTK ("%s: Oversized Ethernet frame, status %4.4x!\n",
-			 dev->name, rx_status);
-		/* A.C.: The chip hangs here. */
-	}
-	tp->stats.rx_errors++;
-	if (rx_status & (RxBadSymbol | RxBadAlign))
-		tp->stats.rx_frame_errors++;
-	if (rx_status & (RxRunt | RxTooLong))
-		tp->stats.rx_length_errors++;
-	if (rx_status & RxCRCErr)
-		tp->stats.rx_crc_errors++;
-	/* Reset the receiver, based on RealTek recommendation. (Bug?) */
-	tp->cur_rx = 0;
+		if (intr_status == 0 || intr_status == 0xffffffff)
+			break;
 
-	/* disable receive */
-	tmp8 = NETDRV_R8 (ChipCmd) & ChipCmdClear;
-	NETDRV_W8_F (ChipCmd, tmp8 | CmdTxEnb);
+		if (intr_status & IntrRxDone)
+			netdev_rx(dev);
 
-	/* A.C.: Reset the multicast list. */
-	netdrv_set_rx_mode (dev);
+		for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+			int entry = np->dirty_tx % TX_RING_SIZE;
+			int tx_status = le32_to_cpu(np->tx_ring[entry].status);
+			if (tx_status & DescOwn)
+				break;
+			if (np->msg_level & NETIF_MSG_TX_DONE)
+				printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
+					   dev->name, tx_status);
+			if (tx_status & (TxErrAbort | TxErrCarrier | TxErrLate
+							 | TxErr16Colls | TxErrHeartbeat)) {
+				if (np->msg_level & NETIF_MSG_TX_ERR)
+					printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+						   dev->name, tx_status);
+				np->stats.tx_errors++;
+				if (tx_status & TxErrCarrier) np->stats.tx_carrier_errors++;
+				if (tx_status & TxErrLate) np->stats.tx_window_errors++;
+				if (tx_status & TxErrHeartbeat) np->stats.tx_heartbeat_errors++;
+#ifdef ETHER_STATS
+				if (tx_status & TxErr16Colls) np->stats.collisions16++;
+				if (tx_status & TxErrAbort) np->stats.tx_aborted_errors++;
+#else
+				if (tx_status & (TxErr16Colls|TxErrAbort))
+					np->stats.tx_aborted_errors++;
+#endif
+			} else {
+				np->stats.tx_packets++;
+				np->stats.collisions += tx_status & TxColls;
+#if LINUX_VERSION_CODE > 0x20127
+				np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+#endif
+#ifdef ETHER_STATS
+				if (tx_status & TxErrDefer) np->stats.tx_deferred++;
+#endif
+			}
+			/* Free the original skb. */
+			dev_free_skb_irq(np->tx_skbuff[entry]);
+			np->tx_skbuff[entry] = 0;
+		}
+		/* Note the 4 slot hysteresis to mark the queue non-full. */
+		if (np->tx_full  &&  np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+			/* The ring is no longer full, allow new TX entries. */
+			np->tx_full = 0;
+			netif_resume_tx_queue(dev);
+		}
 
-	/* XXX potentially temporary hack to
-	 * restart hung receiver */
-	while (--tmp_work > 0) {
-		tmp8 = NETDRV_R8 (ChipCmd);
-		if ((tmp8 & CmdRxEnb) && (tmp8 & CmdTxEnb))
+		/* Abnormal error summary/uncommon events handlers. */
+		if (intr_status & (IntrTxPCIErr | IntrRxPCIErr | LinkChange | StatsMax))
+			netdev_error(dev, intr_status);
+
+		if (--boguscnt < 0) {
+			printk(KERN_WARNING "%s: Too much work at interrupt, "
+				   "status=0x%4.4x.\n",
+				   dev->name, intr_status);
+			np->restore_intr_enable = 1;
 			break;
-		NETDRV_W8_F (ChipCmd,
-			  (tmp8 & ChipCmdClear) | CmdRxEnb | CmdTxEnb);
-	}
+		}
+	} while (1);
 
-	/* G.S.: Re-enable receiver */
-	/* XXX temporary hack to work around receiver hang */
-	netdrv_set_rx_mode (dev);
+	if (np->msg_level & NETIF_MSG_INTR)
+		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+			   dev->name, (int)readl(ioaddr + IntrStatus));
 
-	if (tmp_work <= 0)
-		printk (KERN_WARNING PFX "tx/rx enable wait too long\n");
+#if defined(__i386__)  &&  LINUX_VERSION_CODE < 0x020300
+	clear_bit(0, (void*)&dev->interrupt);
+#endif
+	return;
 }
 
-
-/* The data sheet doesn't describe the Rx ring at all, so I'm guessing at the
-   field alignments and semantics. */
-static void netdrv_rx_interrupt (struct net_device *dev,
-				  struct netdrv_private *tp, void *ioaddr)
+/* This routine is logically part of the interrupt handler, but separated
+   for clarity and better register allocation. */
+static int netdev_rx(struct net_device *dev)
 {
-	unsigned char *rx_ring;
-	u16 cur_rx;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	int entry = np->cur_rx % RX_RING_SIZE;
+	int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
 
-	assert (dev != NULL);
-	assert (tp != NULL);
-	assert (ioaddr != NULL);
-
-	rx_ring = tp->rx_ring;
-	cur_rx = tp->cur_rx;
-
-	DPRINTK ("%s: In netdrv_rx(), current %4.4x BufAddr %4.4x,"
-		 " free to %4.4x, Cmd %2.2x.\n", dev->name, cur_rx,
-		 NETDRV_R16 (RxBufAddr),
-		 NETDRV_R16 (RxBufPtr), NETDRV_R8 (ChipCmd));
+	if (np->msg_level & NETIF_MSG_RX_STATUS) {
+		printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
+			   entry, np->rx_ring[entry].status);
+	}
 
-	while ((NETDRV_R8 (ChipCmd) & RxBufEmpty) == 0) {
-		int ring_offset = cur_rx % RX_BUF_LEN;
-		u32 rx_status;
-		unsigned int rx_size;
-		unsigned int pkt_size;
-		struct sk_buff *skb;
+	/* If EOP is set on the next entry, it's a new packet. Send it up. */
+	while ( ! (np->rx_head_desc->status & cpu_to_le32(DescOwn))) {
+		struct netdev_desc *desc = np->rx_head_desc;
+		u32 desc_status = le32_to_cpu(desc->status);
+		int data_size = le32_to_cpu(desc->length);
 
-		/* read size+status of next frame from DMA ring buffer */
-		rx_status = le32_to_cpu (*(u32 *) (rx_ring + ring_offset));
-		rx_size = rx_status >> 16;
-		pkt_size = rx_size - 4;
-
-		DPRINTK ("%s:  netdrv_rx() status %4.4x, size %4.4x,"
-			 " cur %4.4x.\n", dev->name, rx_status,
-			 rx_size, cur_rx);
-#if NETDRV_DEBUG > 2
-		{
-			int i;
-			DPRINTK ("%s: Frame contents ", dev->name);
-			for (i = 0; i < 70; i++)
-				printk (" %2.2x",
-					rx_ring[ring_offset + i]);
-			printk (".\n");
-		}
+		if (np->msg_level & NETIF_MSG_RX_STATUS)
+			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
+				   desc_status);
+		if (--boguscnt < 0)
+			break;
+		if ((desc_status & RxDescWholePkt) != RxDescWholePkt) {
+			/* Select a message. */
+			printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
+				   "multiple buffers, entry %#x length %d status %4.4x!\n",
+				   dev->name, np->cur_rx, data_size, desc_status);
+			printk(KERN_WARNING "%s: Oversized Ethernet frame %p vs %p.\n",
+				   dev->name, np->rx_head_desc,
+				   &np->rx_ring[np->cur_rx % RX_RING_SIZE]);
+			printk(KERN_WARNING "%s: Oversized Ethernet frame -- next status %x/%x last status %x.\n",
+				   dev->name,
+				   np->rx_ring[(np->cur_rx+1) % RX_RING_SIZE].status,
+				   np->rx_ring[(np->cur_rx+1) % RX_RING_SIZE].length,
+				   np->rx_ring[(np->cur_rx-1) % RX_RING_SIZE].status);
+			np->stats.rx_length_errors++;
+		} else if (desc_status & RxDescErrSum) {
+			/* There was a error. */
+			if (np->msg_level & NETIF_MSG_RX_ERR)
+				printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
+					   desc_status);
+			np->stats.rx_errors++;
+			if (desc_status & (RxErrLong|RxErrRunt))
+				np->stats.rx_length_errors++;
+			if (desc_status & (RxErrFrame|RxErrCode))
+				np->stats.rx_frame_errors++;
+			if (desc_status & RxErrCRC)
+				np->stats.rx_crc_errors++;
+		} else {
+			struct sk_buff *skb;
+			/* Reported length should omit the CRC. */
+			u16 pkt_len = data_size - 4;
+
+#ifndef final_version
+			if (np->msg_level & NETIF_MSG_RX_STATUS)
+				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
+					   " of %d, bogus_cnt %d.\n",
+					   pkt_len, data_size, boguscnt);
 #endif
-
-		/* If Rx err or invalid rx_size/rx_status received
-		 * (which happens if we get lost in the ring),
-		 * Rx process gets reset, so we abort any further
-		 * Rx processing.
-		 */
-		if ((rx_size > (MAX_ETH_FRAME_SIZE+4)) ||
-		    (!(rx_status & RxStatusOK))) {
-			netdrv_rx_err (rx_status, dev, tp, ioaddr);
-			return;
-		}
-
-		/* Malloc up new buffer, compatible with net-2e. */
-		/* Omit the four octet CRC from the length. */
-
-		/* TODO: consider allocating skb's outside of
-		 * interrupt context, both to speed interrupt processing,
-		 * and also to reduce the chances of having to
-		 * drop packets here under memory pressure.
-		 */
-
-		skb = dev_alloc_skb (pkt_size + 2);
-		if (skb) {
-			skb->dev = dev;
-			skb_reserve (skb, 2);	/* 16 byte align the IP fields. */
-
-			eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0);
-			skb_put (skb, pkt_size);
-
-			skb->protocol = eth_type_trans (skb, dev);
-			netif_rx (skb);
+			/* Check if the packet is long enough to accept without copying
+			   to a minimally-sized skbuff. */
+			if (pkt_len < np->rx_copybreak
+				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+				skb->dev = dev;
+				skb_reserve(skb, 2);	/* 16 byte align the IP header */
+#if (LINUX_VERSION_CODE >= 0x20100)
+				/* Use combined copy + cksum if available. */
+				eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+				skb_put(skb, pkt_len);
+#else
+				memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
+					   pkt_len);
+#endif
+			} else {
+				char *temp = skb_put(skb = np->rx_skbuff[entry], pkt_len);
+				np->rx_skbuff[entry] = NULL;
+#ifndef final_version				/* Remove after testing. */
+				if (le32desc_to_virt(np->rx_ring[entry].addr) != temp)
+					printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
+						   "do not match in netdev_rx: %p vs. %p / %p.\n",
+						   dev->name,
+						   le32desc_to_virt(np->rx_ring[entry].addr),
+						   skb->head, temp);
+#endif
+			}
+#ifndef final_version				/* Remove after testing. */
+			/* You will want this info for the initial debug. */
+			if (np->msg_level & NETIF_MSG_PKTDATA)
+				printk(KERN_DEBUG "  Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
+					   "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
+					   "%d.%d.%d.%d.\n",
+					   skb->data[0], skb->data[1], skb->data[2], skb->data[3],
+					   skb->data[4], skb->data[5], skb->data[6], skb->data[7],
+					   skb->data[8], skb->data[9], skb->data[10],
+					   skb->data[11], skb->data[12], skb->data[13],
+					   skb->data[14], skb->data[15], skb->data[16],
+					   skb->data[17]);
+#endif
+			skb->protocol = eth_type_trans(skb, dev);
+			/* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
+			netif_rx(skb);
 			dev->last_rx = jiffies;
-			tp->stats.rx_bytes += pkt_size;
-			tp->stats.rx_packets++;
-		} else {
-			printk (KERN_WARNING
-				"%s: Memory squeeze, dropping packet.\n",
-				dev->name);
-			tp->stats.rx_dropped++;
+			np->stats.rx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+			np->stats.rx_bytes += pkt_len;
+#endif
 		}
-
-		cur_rx = (cur_rx + rx_size + 4 + 3) & ~3;
-		NETDRV_W16_F (RxBufPtr, cur_rx - 16);
+		entry = (++np->cur_rx) % RX_RING_SIZE;
+		np->rx_head_desc = &np->rx_ring[entry];
 	}
 
-	DPRINTK ("%s: Done netdrv_rx(), current %4.4x BufAddr %4.4x,"
-		 " free to %4.4x, Cmd %2.2x.\n", dev->name, cur_rx,
-		 NETDRV_R16 (RxBufAddr),
-		 NETDRV_R16 (RxBufPtr), NETDRV_R8 (ChipCmd));
-
-	tp->cur_rx = cur_rx;
-}
-
-
-static void netdrv_weird_interrupt (struct net_device *dev,
-				     struct netdrv_private *tp,
-				     void *ioaddr,
-				     int status, int link_changed)
-{
-	printk (KERN_DEBUG "%s: Abnormal interrupt, status %8.8x.\n",
-		dev->name, status);
-
-	assert (dev != NULL);
-	assert (tp != NULL);
-	assert (ioaddr != NULL);
-
-	/* Update the error count. */
-	tp->stats.rx_missed_errors += NETDRV_R32 (RxMissed);
-	NETDRV_W32 (RxMissed, 0);
-
-	if ((status & RxUnderrun) && link_changed &&
-	    (tp->drv_flags & HAS_LNK_CHNG)) {
-		/* Really link-change on new chips. */
-		int lpar = NETDRV_R16 (NWayLPAR);
-		int duplex = (lpar & 0x0100) || (lpar & 0x01C0) == 0x0040
-				|| tp->duplex_lock;
-		if (tp->full_duplex != duplex) {
-			tp->full_duplex = duplex;
-			NETDRV_W8 (Cfg9346, Cfg9346_Unlock);
-			NETDRV_W8 (Config1, tp->full_duplex ? 0x60 : 0x20);
-			NETDRV_W8 (Cfg9346, Cfg9346_Lock);
+	/* Refill the Rx ring buffers. */
+	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+		struct sk_buff *skb;
+		entry = np->dirty_rx % RX_RING_SIZE;
+		if (np->rx_skbuff[entry] == NULL) {
+			skb = dev_alloc_skb(np->rx_buf_sz);
+			np->rx_skbuff[entry] = skb;
+			if (skb == NULL)
+				break;				/* Better luck next round. */
+			skb->dev = dev;			/* Mark as being used by this device. */
+			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
+			np->rx_ring[entry].addr = virt_to_le32desc(skb->tail);
 		}
-		status &= ~RxUnderrun;
+		np->rx_ring[entry].length = cpu_to_le32(np->rx_buf_sz);
+		np->rx_ring[entry].status = (entry == RX_RING_SIZE - 1)
+			? cpu_to_le32(DescOwn | DescEndPacket | DescEndRing | DescIntr)
+			: cpu_to_le32(DescOwn | DescEndPacket | DescIntr);
 	}
 
-	/* XXX along with netdrv_rx_err, are we double-counting errors? */
-	if (status &
-	    (RxUnderrun | RxOverflow | RxErr | RxFIFOOver))
-		tp->stats.rx_errors++;
-
-	if (status & (PCSTimeout))
-		tp->stats.rx_length_errors++;
-	if (status & (RxUnderrun | RxFIFOOver))
-		tp->stats.rx_fifo_errors++;
-	if (status & RxOverflow) {
-		tp->stats.rx_over_errors++;
-		tp->cur_rx = NETDRV_R16 (RxBufAddr) % RX_BUF_LEN;
-		NETDRV_W16_F (RxBufPtr, tp->cur_rx - 16);
-	}
-	if (status & PCIErr) {
-		u16 pci_cmd_status;
-		pci_read_config_word (tp->pci_dev, PCI_STATUS, &pci_cmd_status);
-
-		printk (KERN_ERR "%s: PCI Bus error %4.4x.\n",
-			dev->name, pci_cmd_status);
-	}
+	/* Restart Rx engine if stopped. */
+	writel(RxPoll, dev->base_addr + RxCmd);
+	return 0;
 }
 
-
-/* The interrupt handler does all of the Rx thread work and cleans up
-   after the Tx thread. */
-static void netdrv_interrupt (int irq, void *dev_instance,
-			       struct pt_regs *regs)
+static void netdev_error(struct net_device *dev, int intr_status)
 {
-	struct net_device *dev = (struct net_device *) dev_instance;
-	struct netdrv_private *tp = dev->priv;
-	int boguscnt = max_interrupt_work;
-	void *ioaddr = tp->mmio_addr;
-	int status = 0, link_changed = 0; /* avoid bogus "uninit" warning */
-
-	spin_lock (&tp->lock);
-
-	do {
-		status = NETDRV_R16 (IntrStatus);
-
-		/* h/w no longer present (hotplug?) or major error, bail */
-		if (status == 0xFFFF)
-			break;
-
-		/* Acknowledge all of the current interrupt sources ASAP */
-		NETDRV_W16_F (IntrStatus, status);
-
-		DPRINTK ("%s: interrupt  status=%#4.4x new intstat=%#4.4x.\n",
-				dev->name, status,
-				NETDRV_R16 (IntrStatus));
-
-		if ((status &
-		     (PCIErr | PCSTimeout | RxUnderrun | RxOverflow |
-		      RxFIFOOver | TxErr | TxOK | RxErr | RxOK)) == 0)
-			break;
-
-		/* Check uncommon events with one test. */
-		if (status & (PCIErr | PCSTimeout | RxUnderrun | RxOverflow |
-		  	      RxFIFOOver | TxErr | RxErr))
-			netdrv_weird_interrupt (dev, tp, ioaddr,
-						 status, link_changed);
-
-		if (status & (RxOK | RxUnderrun | RxOverflow | RxFIFOOver))	/* Rx interrupt */
-			netdrv_rx_interrupt (dev, tp, ioaddr);
-
-		if (status & (TxOK | TxErr))
-			netdrv_tx_interrupt (dev, tp, ioaddr);
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
 
-		boguscnt--;
-	} while (boguscnt > 0);
-
-	if (boguscnt <= 0) {
-		printk (KERN_WARNING
-			"%s: Too much work at interrupt, "
-			"IntrStatus=0x%4.4x.\n", dev->name,
-			status);
-
-		/* Clear all interrupt sources. */
-		NETDRV_W16 (IntrStatus, 0xffff);
+	if (intr_status & LinkChange) {
+		int phy_num = np->phys[0];
+		if (np->msg_level & NETIF_MSG_LINK)
+			printk(KERN_NOTICE "%s: Link changed: Autonegotiation advertising"
+				   " %4.4x  partner %4.4x.\n", dev->name,
+				   mdio_read(dev, phy_num, 4),
+				   mdio_read(dev, phy_num, 5));
+		/* Clear sticky bit. */
+		mdio_read(dev, phy_num, 1);
+		/* If link beat has returned... */
+		if (mdio_read(dev, phy_num, 1) & 0x0004)
+			netif_link_up(dev);
+		else
+			netif_link_down(dev);
+		check_duplex(dev);
 	}
-
-	spin_unlock (&tp->lock);
-
-	DPRINTK ("%s: exiting interrupt, intr_status=%#4.4x.\n",
-		 dev->name, NETDRV_R16 (IntrStatus));
+	if ((intr_status & TxUnderrun)
+		&& (np->tx_config & TxThresholdField) != TxThresholdField) {
+		long ioaddr = dev->base_addr;
+		np->tx_config += TxThresholdInc;
+		writel(np->tx_config, ioaddr + TxMode);
+		np->stats.tx_fifo_errors++;
+	}
+	if (intr_status & RxOverflow) {
+		printk(KERN_WARNING "%s: Receiver overflow.\n", dev->name);
+		np->stats.rx_over_errors++;
+		netdev_rx(dev);			/* Refill */
+		get_stats(dev);			/* Empty dropped counter. */
+	}
+	if (intr_status & StatsMax) {
+		get_stats(dev);
+	}
+	if ((intr_status & ~(LinkChange|StatsMax|TxUnderrun|RxOverflow))
+		&& (np->msg_level & NETIF_MSG_DRV))
+		printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
+			   dev->name, intr_status);
+	/* Hmmmmm, it's not clear how to recover from PCI faults. */
+	if (intr_status & IntrTxPCIErr)
+		np->stats.tx_fifo_errors++;
+	if (intr_status & IntrRxPCIErr)
+		np->stats.rx_fifo_errors++;
+}
+
+/* We frequently do not bother to spinlock statistics.
+   A window only exists if we have non-atomic adds, the error counts are
+   typically zero, and statistics are non-critical. */ 
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+	long ioaddr = dev->base_addr;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+
+	/* We should lock this segment of code for SMP eventually, although
+	   the vulnerability window is very small and statistics are
+	   non-critical. */
+	/* The chip only need report frame silently dropped. */
+	np->stats.rx_crc_errors	+= readl(ioaddr + RxCRCErrs);
+	np->stats.rx_missed_errors	+= readl(ioaddr + RxMissed);
+
+	return &np->stats;
+}
+
+/* The little-endian AUTODIN II ethernet CRC calculations.
+   A big-endian version is also available.
+   This is slow but compact code.  Do not use this routine for bulk data,
+   use a table-based routine instead.
+   This is common code and should be moved to net/core/crc.c.
+   Chips may use the upper or lower CRC bits, and may reverse and/or invert
+   them.  Select the endian-ness that results in minimal calculations.
+*/
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+static inline unsigned ether_crc_le(int length, unsigned char *data)
+{
+	unsigned int crc = ~0;	/* Initial value. */
+	while(--length >= 0) {
+		unsigned char current_octet = *data++;
+		int bit;
+		for (bit = 8; --bit >= 0; current_octet >>= 1) {
+			if ((crc ^ current_octet) & 1) {
+				crc >>= 1;
+				crc ^= ethernet_polynomial_le;
+			} else
+				crc >>= 1;
+		}
+	}
+	return crc;
 }
 
-
-static int netdrv_close (struct net_device *dev)
+static void set_rx_mode(struct net_device *dev)
 {
-	struct netdrv_private *tp = dev->priv;
-	void *ioaddr = tp->mmio_addr;
-	unsigned long flags;
-
-	DPRINTK ("ENTER\n");
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	u32 mc_filter[2];			/* Multicast hash filter */
+	u32 rx_mode;
 
-	netif_stop_queue (dev);
-
-	DPRINTK ("%s: Shutting down ethercard, status was 0x%4.4x.\n",
-			dev->name, NETDRV_R16 (IntrStatus));
-
-	del_timer_sync (&tp->timer);
-
-	spin_lock_irqsave (&tp->lock, flags);
-
-	/* Stop the chip's Tx and Rx DMA processes. */
-	NETDRV_W8 (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear));
-
-	/* Disable interrupts by clearing the interrupt mask. */
-	NETDRV_W16 (IntrMask, 0x0000);
-
-	/* Update the error counts. */
-	tp->stats.rx_missed_errors += NETDRV_R32 (RxMissed);
-	NETDRV_W32 (RxMissed, 0);
-
-	spin_unlock_irqrestore (&tp->lock, flags);
-
-	synchronize_irq ();
-	free_irq (dev->irq, dev);
-
-	netdrv_tx_clear (tp);
-
-	pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
-			    tp->rx_ring, tp->rx_ring_dma);
-	pci_free_consistent(tp->pci_dev, TX_BUF_TOT_LEN,
-			    tp->tx_bufs, tp->tx_bufs_dma);
-	tp->rx_ring = NULL;
-	tp->tx_bufs = NULL;
-
-	/* Green! Put the chip in low-power mode. */
-	NETDRV_W8 (Cfg9346, Cfg9346_Unlock);
-	NETDRV_W8 (Config1, 0x03);
-	NETDRV_W8 (Cfg9346, Cfg9346_Lock);
-
-	DPRINTK ("EXIT\n");
-	return 0;
+	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
+		/* Unconditionally log net taps. */
+		printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+		memset(mc_filter, ~0, sizeof(mc_filter));
+		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAllPhys
+			| AcceptMyPhys;
+	} else if ((dev->mc_count > np->multicast_filter_limit)
+			   ||  (dev->flags & IFF_ALLMULTI)) {
+		/* Too many to match, or accept all multicasts. */
+		memset(mc_filter, 0xff, sizeof(mc_filter));
+		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+	} else {
+		struct dev_mc_list *mclist;
+		int i;
+		memset(mc_filter, 0, sizeof(mc_filter));
+		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+			 i++, mclist = mclist->next) {
+			set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f,
+					mc_filter);
+		}
+		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+	}
+	writel(mc_filter[0], ioaddr + MulticastFilter0);
+	writel(mc_filter[1], ioaddr + MulticastFilter1);
+	writel(rx_mode, ioaddr + RxMode);
 }
 
-
-static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
+/*
+  Handle user-level ioctl() calls.
+  We must use two numeric constants as the key because some clueless person
+  changed the value for the symbolic name.
+*/
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
-	struct netdrv_private *tp = dev->priv;
-	struct mii_ioctl_data *data = (struct mii_ioctl_data *) & rq->ifr_data;
-	unsigned long flags;
-	int rc = 0;
-
-	DPRINTK ("ENTER\n");
-
-	switch (cmd) {
-	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
-	case SIOCDEVPRIVATE:		/* for binary compat, remove in 2.5 */
-		data->phy_id = tp->phys[0] & 0x3f;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	u16 *data = (u16 *)&rq->ifr_data;
+	u32 *data32 = (void *)&rq->ifr_data;
+
+	switch(cmd) {
+	case 0x8947: case 0x89F0:
+		/* SIOCGMIIPHY: Get the address of the PHY in use. */
+		data[0] = np->phys[0] & 0x1f;
 		/* Fall Through */
-
-	case SIOCGMIIREG:		/* Read MII PHY register. */
-	case SIOCDEVPRIVATE+1:		/* for binary compat, remove in 2.5 */
-		spin_lock_irqsave (&tp->lock, flags);
-		data->val_out = mdio_read (dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
-		spin_unlock_irqrestore (&tp->lock, flags);
-		break;
-
-	case SIOCSMIIREG:		/* Write MII PHY register. */
-	case SIOCDEVPRIVATE+2:		/* for binary compat, remove in 2.5 */
-		if (!capable (CAP_NET_ADMIN)) {
-			rc = -EPERM;
-			break;
+	case 0x8948: case 0x89F1:
+		/* SIOCGMIIREG: Read the specified MII register. */
+		data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
+		return 0;
+	case 0x8949: case 0x89F2:
+		/* SIOCSMIIREG: Write the specified MII register */
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		if (data[0] == np->phys[0]) {
+			u16 value = data[2];
+			switch (data[1]) {
+			case 0:
+				/* Check for autonegotiation on or reset. */
+				np->medialock = (value & 0x9000) ? 0 : 1;
+				if (np->medialock)
+					np->full_duplex = (value & 0x0100) ? 1 : 0;
+				break;
+			case 4: np->advertising = value; break;
+			}
+			/* Perhaps check_duplex(dev), depending on chip semantics. */
 		}
-
-		spin_lock_irqsave (&tp->lock, flags);
-		mdio_write (dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
-		spin_unlock_irqrestore (&tp->lock, flags);
-		break;
-
+		mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+		return 0;
+	case SIOCGPARAMS:
+		data32[0] = np->msg_level;
+		data32[1] = np->multicast_filter_limit;
+		data32[2] = np->max_interrupt_work;
+		data32[3] = np->rx_copybreak;
+		return 0;
+	case SIOCSPARAMS:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		np->msg_level = data32[0];
+		np->multicast_filter_limit = data32[1];
+		np->max_interrupt_work = data32[2];
+		np->rx_copybreak = data32[3];
+		return 0;
 	default:
-		rc = -EOPNOTSUPP;
-		break;
+		return -EOPNOTSUPP;
 	}
-
-	DPRINTK ("EXIT, returning %d\n", rc);
-	return rc;
 }
 
-
-static struct net_device_stats *netdrv_get_stats (struct net_device *dev)
+static int netdev_close(struct net_device *dev)
 {
-	struct netdrv_private *tp = dev->priv;
-	void *ioaddr = tp->mmio_addr;
-
-	DPRINTK ("ENTER\n");
-
-	assert (tp != NULL);
-
-	if (netif_running(dev)) {
-		unsigned long flags;
-
-		spin_lock_irqsave (&tp->lock, flags);
+	long ioaddr = dev->base_addr;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	int i;
 
-		tp->stats.rx_missed_errors += NETDRV_R32 (RxMissed);
-		NETDRV_W32 (RxMissed, 0);
+	netif_stop_tx_queue(dev);
 
-		spin_unlock_irqrestore (&tp->lock, flags);
+	if (np->msg_level & NETIF_MSG_IFDOWN) {
+		printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x "
+			   "Rx %4.4x Int %2.2x.\n",
+			   dev->name, (int)readl(ioaddr + TxStatus),
+			   (int)readl(ioaddr + RxStatus), (int)readl(ioaddr + IntrStatus));
+		printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
+			   dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
 	}
 
-	DPRINTK ("EXIT\n");
-	return &tp->stats;
-}
-
-/* Set or clear the multicast filter for this adaptor.
-   This routine is not state sensitive and need not be SMP locked. */
-
-static void netdrv_set_rx_mode (struct net_device *dev)
-{
-	struct netdrv_private *tp = dev->priv;
-	void *ioaddr = tp->mmio_addr;
-	u32 mc_filter[2];	/* Multicast hash filter */
-	int i, rx_mode;
-	u32 tmp;
+	/* Disable interrupts by clearing the interrupt mask. */
+	writel(0x0000, ioaddr + IntrEnable);
 
-	DPRINTK ("ENTER\n");
+	/* Stop the chip's Tx and Rx processes. */
+	writel(RxDisable, ioaddr + RxCmd);
+	writew(TxDisable, ioaddr + TxCmd);
+
+	del_timer(&np->timer);
+
+#ifdef __i386__
+	if (np->msg_level & NETIF_MSG_IFDOWN) {
+		printk("\n"KERN_DEBUG"  Tx ring at %8.8x:\n",
+			   (int)virt_to_bus(np->tx_ring));
+		for (i = 0; i < TX_RING_SIZE; i++)
+			printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
+				   i, np->tx_ring[i].length,
+				   np->tx_ring[i].status, np->tx_ring[i].addr);
+		printk(KERN_DEBUG  "\n" KERN_DEBUG "  Rx ring %8.8x:\n",
+			   (int)virt_to_bus(np->rx_ring));
+		for (i = 0; i < RX_RING_SIZE; i++) {
+			printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
+				   i, np->rx_ring[i].length,
+				   np->rx_ring[i].status, np->rx_ring[i].addr);
+		}
+	}
+#endif /* __i386__ debugging only */
 
-	DPRINTK ("%s:   netdrv_set_rx_mode(%4.4x) done -- Rx config %8.8x.\n",
-			dev->name, dev->flags, NETDRV_R32 (RxConfig));
+	free_irq(dev->irq, dev);
 
-	/* Note: do not reorder, GCC is clever about common statements. */
-	if (dev->flags & IFF_PROMISC) {
-		/* Unconditionally log net taps. */
-		printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n",
-			dev->name);
-		rx_mode =
-		    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
-		    AcceptAllPhys;
-		mc_filter[1] = mc_filter[0] = 0xffffffff;
-	} else if ((dev->mc_count > multicast_filter_limit)
-		   || (dev->flags & IFF_ALLMULTI)) {
-		/* Too many to filter perfectly -- accept all multicasts. */
-		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
-		mc_filter[1] = mc_filter[0] = 0xffffffff;
-	} else {
-		struct dev_mc_list *mclist;
-		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
-		mc_filter[1] = mc_filter[0] = 0;
-		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
-		     i++, mclist = mclist->next)
-			set_bit (ether_crc (ETH_ALEN, mclist->dmi_addr) >> 26,
-				 mc_filter);
+	/* Free all the skbuffs in the Rx queue. */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		np->rx_ring[i].status = 0;
+		np->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
+		if (np->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+			np->rx_skbuff[i]->free = 1;
+#endif
+			dev_free_skb(np->rx_skbuff[i]);
+		}
+		np->rx_skbuff[i] = 0;
+	}
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		if (np->tx_skbuff[i])
+			dev_free_skb(np->tx_skbuff[i]);
+		np->tx_skbuff[i] = 0;
 	}
 
-	/* if called from irq handler, lock already acquired */
-	if (!in_irq ())
-		spin_lock_irq (&tp->lock);
-
-	/* We can safely update without stopping the chip. */
-	tmp = netdrv_rx_config | rx_mode |
-		(NETDRV_R32 (RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
-	NETDRV_W32_F (RxConfig, tmp);
-	NETDRV_W32_F (MAR0 + 0, mc_filter[0]);
-	NETDRV_W32_F (MAR0 + 4, mc_filter[1]);
+	writel(0x00, ioaddr + LEDCtrl);
 
-	if (!in_irq ())
-		spin_unlock_irq (&tp->lock);
+	MOD_DEC_USE_COUNT;
 
-	DPRINTK ("EXIT\n");
+	return 0;
 }
 
-
-#ifdef CONFIG_PM
-
-static int netdrv_suspend (struct pci_dev *pdev, u32 state)
+static int netdev_pwr_event(void *dev_instance, int event)
 {
-	struct net_device *dev = pci_get_drvdata (pdev);
-	struct netdrv_private *tp = dev->priv;
-	void *ioaddr = tp->mmio_addr;
-	unsigned long flags;
-
-	if (!netif_running(dev))
-		return;
-	netif_device_detach (dev);
-
-	spin_lock_irqsave (&tp->lock, flags);
-
-	/* Disable interrupts, stop Tx and Rx. */
-	NETDRV_W16 (IntrMask, 0x0000);
-	NETDRV_W8 (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear));
-
-	/* Update the error counts. */
-	tp->stats.rx_missed_errors += NETDRV_R32 (RxMissed);
-	NETDRV_W32 (RxMissed, 0);
-
-	spin_unlock_irqrestore (&tp->lock, flags);
-
-	pci_power_off (pdev, -1);
+	struct net_device *dev = dev_instance;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+
+	if (np->msg_level & NETIF_MSG_LINK)
+		printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
+	switch(event) {
+	case DRV_ATTACH:
+		MOD_INC_USE_COUNT;
+		break;
+	case DRV_SUSPEND:
+		/* Disable interrupts, stop Tx and Rx. */
+		writel(0x0000, ioaddr + IntrEnable);
+		writel(RxDisable, ioaddr + RxCmd);
+		writew(TxDisable, ioaddr + TxCmd);
+		break;
+	case DRV_RESUME:
+		/* This is incomplete: the actions are very chip specific. */
+		set_rx_mode(dev);
+		writel(np->intr_enable, ioaddr + IntrEnable);
+		break;
+	case DRV_DETACH: {
+		struct net_device **devp, **next;
+		if (dev->flags & IFF_UP) {
+			/* Some, but not all, kernel versions close automatically. */
+			dev_close(dev);
+			dev->flags &= ~(IFF_UP|IFF_RUNNING);
+		}
+		unregister_netdev(dev);
+		release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
+#ifndef USE_IO_OPS
+		iounmap((char *)dev->base_addr);
+#endif
+		for (devp = &root_net_dev; *devp; devp = next) {
+			next = &((struct netdev_private *)(*devp)->priv)->next_module;
+			if (*devp == dev) {
+				*devp = *next;
+				break;
+			}
+		}
+		if (np->priv_addr)
+			kfree(np->priv_addr);
+		kfree(dev);
+		MOD_DEC_USE_COUNT;
+		break;
+	}
+	}
 
 	return 0;
 }
 
-
-static int netdrv_resume (struct pci_dev *pdev)
+
+#ifdef MODULE
+int init_module(void)
 {
-	struct net_device *dev = pci_get_drvdata (pdev);
-
-	if (!netif_running(dev))
-		return;
-	pci_power_on (pdev);
-	netif_device_attach (dev);
-	netdrv_hw_start (dev);
-
-	return 0;
+	if (debug >= NETIF_MSG_DRV)	/* Emit version even if no cards detected. */
+		printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+	return pci_drv_register(&netfin_drv_id, NULL);
 }
 
-#endif /* CONFIG_PM */
-
-
-static struct pci_driver netdrv_pci_driver = {
-	name:		MODNAME,
-	id_table:	netdrv_pci_tbl,
-	probe:		netdrv_init_one,
-	remove:		__devexit_p(netdrv_remove_one),
-#ifdef CONFIG_PM
-	suspend:	netdrv_suspend,
-	resume:		netdrv_resume,
-#endif /* CONFIG_PM */
-};
-
-
-static int __init netdrv_init_module (void)
+void cleanup_module(void)
 {
-/* when a module, this is printed whether or not devices are found in probe */
-#ifdef MODULE
-	printk(version);
-#endif
-	return pci_module_init (&netdrv_pci_driver);
-}
+	struct net_device *next_dev;
 
+	pci_drv_unregister(&netfin_drv_id);
 
-static void __exit netdrv_cleanup_module (void)
-{
-	pci_unregister_driver (&netdrv_pci_driver);
+	/* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+	while (root_net_dev) {
+		struct netdev_private *np = (void *)(root_net_dev->priv);
+		unregister_netdev(root_net_dev);
+#ifdef USE_IO_OPS
+		release_region(root_net_dev->base_addr,
+					   pci_id_tbl[np->chip_id].io_size);
+#else
+		iounmap((char *)(root_net_dev->base_addr));
+#endif
+		next_dev = np->next_module;
+		if (np->priv_addr)
+			kfree(np->priv_addr);
+		kfree(root_net_dev);
+		root_net_dev = next_dev;
+	}
 }
 
-
-module_init(netdrv_init_module);
-module_exit(netdrv_cleanup_module);
+#endif  /* MODULE */
+
+/*
+ * Local variables:
+ *  compile-command: "make KERNVER=`uname -r` pci-skeleton.o"
+ *  compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c pci-skeleton.c"
+ *  simple-compile-command: "gcc -DMODULE -O6 -c pci-skeleton.c"
+ *  c-indent-level: 4
+ *  c-basic-offset: 4
+ *  tab-width: 4
+ * End:
+ */
diff -uNr net/drivers/net/rtl8139.c linux-2.4.20/drivers/net/rtl8139.c
--- net/drivers/net/rtl8139.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.4.20/drivers/net/rtl8139.c	2003-01-14 20:29:35.000000000 -0500
@@ -0,0 +1,1731 @@
+/* rtl8139.c: A RealTek RTL8129/8139 Fast Ethernet driver for Linux. */
+/*
+	Written 1997-2002 by Donald Becker.
+	This software may be used and distributed according to the terms of
+	the GNU General Public License (GPL), incorporated herein by reference.
+	Drivers based on or derived from this code fall under the GPL and must
+	retain the authorship, copyright and license notice.  This file is not
+	a complete program and may only be used when the entire operating
+	system is licensed under the GPL.
+
+	This driver is for boards based on the RTL8129 and RTL8139 PCI ethernet
+	chips.
+
+	The author may be reached as becker@scyld.com, or C/O
+	Scyld Computing Corporation
+	410 Severn Ave., Suite 210
+	Annapolis MD 21403
+
+	Support and updates available at
+	http://www.scyld.com/network/rtl8139.html
+
+	Twister-tuning table provided by Kinston <shangh@realtek.com.tw>.
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char versionA[] =
+"rtl8139.c:v1.22 11/17/2002 Donald Becker, becker@scyld.com.\n";
+static const char versionB[] =
+" http://www.scyld.com/network/rtl8139.html\n";
+
+#ifndef USE_MEM_OPS
+/* Note: Register access width and timing restrictions apply in MMIO mode.
+   This updated driver should nominally work, but I/O mode is better tested. */
+#define USE_IO_OPS
+#endif
+
+/* The user-configurable values.
+   These may be modified when a driver module is loaded.*/
+/* Message enable level: 0..31 = no..all messages.  See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+   The RTL chips use a 64 element hash table based on the Ethernet CRC.  It
+   is efficient to update the hardware filter, but recalculating the table
+   for a long filter list is painful.  */
+static int multicast_filter_limit = 32;
+
+/* Used to pass the full-duplex flag, etc. */
+#define MAX_UNITS 8		/* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Operational parameters that are set at compile time. */
+
+/* Maximum size of the in-memory receive ring (smaller if no memory). */
+#define RX_BUF_LEN_IDX	2			/* 0==8K, 1==16K, 2==32K, 3==64K */
+/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
+#define TX_BUF_SIZE	1536
+
+/* PCI Tuning Parameters
+   Threshold is bytes transferred to chip before transmission starts. */
+#define TX_FIFO_THRESH 256	/* In bytes, rounded down to 32 byte units. */
+
+/* The following settings are log_2(bytes)-4:  0 == 16 bytes .. 6==1024. */
+#define RX_FIFO_THRESH	4		/* Rx buffer level before first PCI xfer.  */
+#define RX_DMA_BURST	4		/* Maximum PCI burst, '4' is 256 bytes */
+#define TX_DMA_BURST	4		/* Calculate as 16<<val. */
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT  (6*HZ)
+
+/* Allocation size of Rx buffers with full-sized Ethernet frames.
+   This is a cross-driver value that is not a limit,
+   but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ		1536
+
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning  You must compile this file with the correct options!
+#warning  See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h>		/* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#if LINUX_VERSION_CODE >= 0x20300
+#include <linux/spinlock.h>
+#elif LINUX_VERSION_CODE >= 0x20200
+#include <asm/spinlock.h>
+#endif
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+#if (LINUX_VERSION_CODE >= 0x20100)  &&  defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+/*
+				Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the RealTek RTL8129 series, the RealTek
+Fast Ethernet controllers for PCI and CardBus.  This chip is used on many
+low-end boards, sometimes with custom chip labels.
+
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board.  The system BIOS will assign the
+PCI INTA signal to a (preferably otherwise unused) system IRQ line.
+Note: Kernel versions earlier than 1.3.73 do not support shared PCI
+interrupt lines.
+
+III. Driver operation
+
+IIIa. Rx Ring buffers
+
+The receive unit uses a single linear ring buffer rather than the more
+common (and more efficient) descriptor-based architecture.  Incoming frames
+are sequentially stored into the Rx region, and the host copies them into
+skbuffs.
+
+Comment: While it is theoretically possible to process many frames in place,
+any delay in Rx processing would block the Rx ring and cause us to drop
+frames.  It would be difficult to design a protocol stack where the data
+buffer could be recalled by the device driver.
+
+IIIb. Tx operation
+
+The RTL8129 uses a fixed set of four Tx descriptors in register space.  Tx
+frames must be 32 bit aligned.  Linux aligns the IP header on word
+boundaries, and 14 byte ethernet header means that almost all frames will
+need to be copied to an alignment buffer.  The driver statically allocates
+alignment the four alignment buffers at open() time.
+
+IVb. References
+
+http://www.realtek.com.tw/cn/cn.html
+http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
+
+IVc. Errata
+
+*/
+
+
+static void *rtl8139_probe1(struct pci_dev *pdev, void *init_dev,
+							long ioaddr, int irq, int chip_idx, int find_cnt);
+static int rtl_pwr_event(void *dev_instance, int event);
+
+enum chip_capability_flags {HAS_MII_XCVR=0x01, HAS_CHIP_XCVR=0x02,
+							HAS_LNK_CHNG=0x04, HAS_DESC=0x08};
+#ifdef USE_IO_OPS
+#define RTL8139_IOTYPE  PCI_USES_MASTER|PCI_USES_IO |PCI_ADDR0
+#else
+#define RTL8139_IOTYPE  PCI_USES_MASTER|PCI_USES_MEM|PCI_ADDR1
+#endif
+#define RTL8129_CAPS  HAS_MII_XCVR
+#define RTL8139_CAPS  HAS_CHIP_XCVR|HAS_LNK_CHNG
+#define RTL8139D_CAPS  HAS_CHIP_XCVR|HAS_LNK_CHNG|HAS_DESC
+
+/* Note: Update the marked constant in _attach() if the RTL8139B entry moves.*/
+static struct pci_id_info pci_tbl[] = {
+	{"RealTek RTL8139C+, 64 bit high performance",
+	 { 0x813910ec, 0xffffffff, 0,0, 0x20, 0xff},
+	 RTL8139_IOTYPE, 0x80, RTL8139D_CAPS, },
+	{"RealTek RTL8139C Fast Ethernet",
+	 { 0x813910ec, 0xffffffff, 0,0, 0x10, 0xff},
+	 RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+	{"RealTek RTL8129 Fast Ethernet", { 0x812910ec, 0xffffffff,},
+	 RTL8139_IOTYPE, 0x80, RTL8129_CAPS, },
+	{"RealTek RTL8139 Fast Ethernet", { 0x813910ec, 0xffffffff,},
+	 RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+	{"RealTek RTL8139B PCI/CardBus",  { 0x813810ec, 0xffffffff,},
+	 RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+	{"SMC1211TX EZCard 10/100 (RealTek RTL8139)", { 0x12111113, 0xffffffff,},
+	 RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+	{"Accton MPX5030 (RealTek RTL8139)", { 0x12111113, 0xffffffff,},
+	 RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+	{"D-Link DFE-538TX (RealTek RTL8139)", { 0x13001186, 0xffffffff,},
+	 RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+	{"LevelOne FPC-0106Tx (RealTek RTL8139)", { 0x0106018a, 0xffffffff,},
+	 RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+	{"Compaq HNE-300 (RealTek RTL8139c)", { 0x8139021b, 0xffffffff,},
+	 RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+	{"Edimax EP-4103DL CardBus (RealTek RTL8139c)", { 0xab0613d1, 0xffffffff,},
+	 RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+	{0,},						/* 0 terminated list. */
+};
+
+struct drv_id_info rtl8139_drv_id = {
+	"realtek", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_tbl,
+	rtl8139_probe1, rtl_pwr_event };
+
+#ifndef USE_IO_OPS
+#undef inb
+#undef inw
+#undef inl
+#undef outb
+#undef outw
+#undef outl
+#define inb readb
+#define inw readw
+#define inl readl
+#define outb writeb
+#define outw writew
+#define outl writel
+#endif
+
+/* The rest of these values should never change. */
+#define NUM_TX_DESC	4			/* Number of Tx descriptor registers. */
+
+/* Symbolic offsets to registers. */
+enum RTL8129_registers {
+	MAC0=0,						/* Ethernet hardware address. */
+	MAR0=8,						/* Multicast filter. */
+	TxStatus0=0x10,				/* Transmit status (Four 32bit registers). */
+	TxAddr0=0x20,				/* Tx descriptors (also four 32bit). */
+	RxBuf=0x30, RxEarlyCnt=0x34, RxEarlyStatus=0x36,
+	ChipCmd=0x37, RxBufPtr=0x38, RxBufAddr=0x3A,
+	IntrMask=0x3C, IntrStatus=0x3E,
+	TxConfig=0x40, RxConfig=0x44,
+	Timer=0x48,					/* A general-purpose counter. */
+	RxMissed=0x4C,				/* 24 bits valid, write clears. */
+	Cfg9346=0x50, Config0=0x51, Config1=0x52,
+	FlashReg=0x54, GPPinData=0x58, GPPinDir=0x59, MII_SMI=0x5A, HltClk=0x5B,
+	MultiIntr=0x5C, TxSummary=0x60,
+	MII_BMCR=0x62, MII_BMSR=0x64, NWayAdvert=0x66, NWayLPAR=0x68,
+	NWayExpansion=0x6A,
+	/* Undocumented registers, but required for proper operation. */
+	FIFOTMS=0x70,	/* FIFO Control and test. */
+	CSCR=0x74,	/* Chip Status and Configuration Register. */
+	PARA78=0x78, PARA7c=0x7c,	/* Magic transceiver parameter register. */
+};
+
+enum ChipCmdBits {
+	CmdReset=0x10, CmdRxEnb=0x08, CmdTxEnb=0x04, RxBufEmpty=0x01, };
+
+/* Interrupt register bits, using my own meaningful names. */
+enum IntrStatusBits {
+	PCIErr=0x8000, PCSTimeout=0x4000,
+	RxFIFOOver=0x40, RxUnderrun=0x20, RxOverflow=0x10,
+	TxErr=0x08, TxOK=0x04, RxErr=0x02, RxOK=0x01,
+};
+enum TxStatusBits {
+	TxHostOwns=0x2000, TxUnderrun=0x4000, TxStatOK=0x8000,
+	TxOutOfWindow=0x20000000, TxAborted=0x40000000, TxCarrierLost=0x80000000,
+};
+enum RxStatusBits {
+	RxMulticast=0x8000, RxPhysical=0x4000, RxBroadcast=0x2000,
+	RxBadSymbol=0x0020, RxRunt=0x0010, RxTooLong=0x0008, RxCRCErr=0x0004,
+	RxBadAlign=0x0002, RxStatusOK=0x0001,
+};
+
+/* Twister tuning parameters from RealTek.
+   Completely undocumented, but required to tune bad links. */
+enum CSCRBits {
+	CSCR_LinkOKBit=0x0400, CSCR_LinkChangeBit=0x0800,
+	CSCR_LinkStatusBits=0x0f000, CSCR_LinkDownOffCmd=0x003c0,
+	CSCR_LinkDownCmd=0x0f3c0,
+};
+#define PARA78_default	0x78fa8388
+#define PARA7c_default	0xcb38de43 			/* param[0][3] */
+#define PARA7c_xxx		0xcb38de43
+unsigned long param[4][4]={
+	{0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43},
+	{0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
+	{0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
+	{0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83}
+};
+
+#define PRIV_ALIGN	15 	/* Desired alignment mask */
+struct rtl8129_private {
+	struct net_device *next_module;
+	void *priv_addr;					/* Unaligned address for kfree */
+
+	int chip_id, drv_flags;
+	struct pci_dev *pci_dev;
+	struct net_device_stats stats;
+	struct timer_list timer;	/* Media selection timer. */
+	int msg_level;
+	int max_interrupt_work;
+
+	/* Receive state. */
+	unsigned char *rx_ring;
+	unsigned int cur_rx;		/* Index into the Rx buffer of next Rx pkt. */
+	unsigned int rx_buf_len;	/* Size (8K 16K 32K or 64KB) of the Rx ring */
+
+	/* Transmit state. */
+	unsigned int cur_tx, dirty_tx, tx_flag;
+	unsigned long tx_full;				/* The Tx queue is full. */
+	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
+	struct sk_buff* tx_skbuff[NUM_TX_DESC];
+	unsigned char *tx_buf[NUM_TX_DESC];	/* Tx bounce buffers */
+	unsigned char *tx_bufs;				/* Tx bounce buffer region. */
+
+	/* Receive filter state. */
+	unsigned int rx_config;
+	u32 mc_filter[2];		 /* Multicast hash filter */
+	int cur_rx_mode;
+	int multicast_filter_limit;
+
+	/* Transceiver state. */
+	char phys[4];						/* MII device addresses. */
+	u16 advertising;					/* NWay media advertisement */
+	char twistie, twist_row, twist_col;	/* Twister tune state. */
+	u8	config1;
+	unsigned int full_duplex:1;			/* Full-duplex operation requested. */
+	unsigned int duplex_lock:1;
+	unsigned int default_port:4;		/* Last dev->if_port value. */
+	unsigned int media2:4;				/* Secondary monitored media port. */
+	unsigned int medialock:1;			/* Don't sense media type. */
+	unsigned int mediasense:1;			/* Media sensing in progress. */
+};
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("RealTek RTL8129/8139 Fast Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(debug, "i");
+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(full_duplex, "Non-zero to set forced full duplex.");
+MODULE_PARM_DESC(multicast_filter_limit,
+				 "Multicast addresses before switching to Rx-all-multicast");
+MODULE_PARM_DESC(max_interrupt_work,
+				 "Driver maximum events handled per interrupt");
+
+static int rtl8129_open(struct net_device *dev);
+static void rtl_hw_start(struct net_device *dev);
+static int read_eeprom(long ioaddr, int location, int addr_len);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int val);
+static void rtl8129_timer(unsigned long data);
+static void rtl8129_tx_timeout(struct net_device *dev);
+static void rtl8129_init_ring(struct net_device *dev);
+static int rtl8129_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int rtl8129_rx(struct net_device *dev);
+static void rtl8129_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static void rtl_error(struct net_device *dev, int status, int link_status);
+static int rtl8129_close(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static struct net_device_stats *rtl8129_get_stats(struct net_device *dev);
+static inline u32 ether_crc(int length, unsigned char *data);
+static void set_rx_mode(struct net_device *dev);
+
+
+/* A list of all installed RTL8129 devices, for removing the driver module. */
+static struct net_device *root_rtl8129_dev = NULL;
+
+#ifndef MODULE
+int rtl8139_probe(struct net_device *dev)
+{
+	static int did_version = 0;			/* Already printed version info. */
+
+	if (debug >= NETIF_MSG_DRV	/* Emit version even if no cards detected. */
+		&&  did_version++ == 0)
+		printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB);
+	return pci_drv_register(&rtl8139_drv_id, dev);
+}
+#endif
+
+static void *rtl8139_probe1(struct pci_dev *pdev, void *init_dev,
+							long ioaddr, int irq, int chip_idx, int found_cnt)
+{
+	struct net_device *dev;
+	struct rtl8129_private *np;
+	void *priv_mem;
+	int i, option = found_cnt < MAX_UNITS ? options[found_cnt] : 0;
+	int config1;
+
+	dev = init_etherdev(init_dev, 0);
+	if (!dev)
+		return NULL;
+
+	printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ",
+		   dev->name, pci_tbl[chip_idx].name, ioaddr, irq);
+
+	/* Bring the chip out of low-power mode. */
+	config1 = inb(ioaddr + Config1);
+	if (pci_tbl[chip_idx].drv_flags & HAS_MII_XCVR)			/* rtl8129 chip */
+		outb(config1 & ~0x03, ioaddr + Config1);
+
+	{
+		int addr_len = read_eeprom(ioaddr, 0, 8) == 0x8129 ? 8 : 6;
+		for (i = 0; i < 3; i++)
+			((u16 *)(dev->dev_addr))[i] =
+				le16_to_cpu(read_eeprom(ioaddr, i+7, addr_len));
+	}
+
+	for (i = 0; i < 5; i++)
+		printk("%2.2x:", dev->dev_addr[i]);
+	printk("%2.2x.\n", dev->dev_addr[i]);
+
+	/* Make certain elements e.g. descriptor lists are aligned. */
+	priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+	/* Check for the very unlikely case of no memory. */
+	if (priv_mem == NULL)
+		return NULL;
+
+	/* We do a request_region() to register /proc/ioports info. */
+	request_region(ioaddr, pci_tbl[chip_idx].io_size, dev->name);
+
+	dev->base_addr = ioaddr;
+	dev->irq = irq;
+
+	dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+	memset(np, 0, sizeof(*np));
+	np->priv_addr = priv_mem;
+
+	np->next_module = root_rtl8129_dev;
+	root_rtl8129_dev = dev;
+
+	np->pci_dev = pdev;
+	np->chip_id = chip_idx;
+	np->drv_flags = pci_tbl[chip_idx].drv_flags;
+	np->msg_level = (1 << debug) - 1;
+	np->max_interrupt_work = max_interrupt_work;
+	np->multicast_filter_limit = multicast_filter_limit;
+
+	np->config1 = config1;
+
+	/* Find the connected MII xcvrs.
+	   Doing this in open() would allow detecting external xcvrs later, but
+	   takes too much time. */
+	if (np->drv_flags & HAS_MII_XCVR) {
+		int phy, phy_idx = 0;
+		for (phy = 0; phy < 32 && phy_idx < sizeof(np->phys); phy++) {
+			int mii_status = mdio_read(dev, phy, 1);
+			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
+				np->phys[phy_idx++] = phy;
+				np->advertising = mdio_read(dev, phy, 4);
+				printk(KERN_INFO "%s: MII transceiver %d status 0x%4.4x "
+					   "advertising %4.4x.\n",
+					   dev->name, phy, mii_status, np->advertising);
+			}
+		}
+		if (phy_idx == 0) {
+			printk(KERN_INFO "%s: No MII transceivers found!  Assuming SYM "
+				   "transceiver.\n",
+				   dev->name);
+			np->phys[0] = 32;
+		}
+	} else
+		np->phys[0] = 32;
+
+	/* Put the chip into low-power mode. */
+	outb(0xC0, ioaddr + Cfg9346);
+	if (np->drv_flags & HAS_MII_XCVR)			/* rtl8129 chip */
+		outb(0x03, ioaddr + Config1);
+
+	outb('H', ioaddr + HltClk);		/* 'R' would leave the clock running. */
+
+	/* The lower four bits are the media type. */
+	if (option > 0) {
+		np->full_duplex = (option & 0x220) ? 1 : 0;
+		np->default_port = option & 0x330;
+		if (np->default_port)
+			np->medialock = 1;
+	}
+
+	if (found_cnt < MAX_UNITS  &&  full_duplex[found_cnt] > 0)
+		np->full_duplex = full_duplex[found_cnt];
+
+	if (np->full_duplex) {
+		printk(KERN_INFO "%s: Media type forced to Full Duplex.\n", dev->name);
+		/* Changing the MII-advertised media might prevent re-connection. */
+		np->duplex_lock = 1;
+	}
+	if (np->default_port) {
+		printk(KERN_INFO "  Forcing %dMbs %s-duplex operation.\n",
+			   (option & 0x300 ? 100 : 10),
+			   (option & 0x220 ? "full" : "half"));
+		mdio_write(dev, np->phys[0], 0,
+				   ((option & 0x300) ? 0x2000 : 0) | 	/* 100mbps? */
+				   ((option & 0x220) ? 0x0100 : 0)); /* Full duplex? */
+	}
+
+	/* The rtl81x9-specific entries in the device structure. */
+	dev->open = &rtl8129_open;
+	dev->hard_start_xmit = &rtl8129_start_xmit;
+	dev->stop = &rtl8129_close;
+	dev->get_stats = &rtl8129_get_stats;
+	dev->set_multicast_list = &set_rx_mode;
+	dev->do_ioctl = &mii_ioctl;
+
+	return dev;
+}
+
+/* Serial EEPROM section. */
+
+/*  EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK	0x04	/* EEPROM shift clock. */
+#define EE_CS			0x08	/* EEPROM chip select. */
+#define EE_DATA_WRITE	0x02	/* EEPROM chip data in. */
+#define EE_WRITE_0		0x00
+#define EE_WRITE_1		0x02
+#define EE_DATA_READ	0x01	/* EEPROM chip data out. */
+#define EE_ENB			(0x80 | EE_CS)
+
+/* Delay between EEPROM clock transitions.
+   No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
+ */
+
+#define eeprom_delay()	inl(ee_addr)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_WRITE_CMD	(5)
+#define EE_READ_CMD		(6)
+#define EE_ERASE_CMD	(7)
+
+static int read_eeprom(long ioaddr, int location, int addr_len)
+{
+	int i;
+	unsigned retval = 0;
+	long ee_addr = ioaddr + Cfg9346;
+	int read_cmd = location | (EE_READ_CMD << addr_len);
+
+	outb(EE_ENB & ~EE_CS, ee_addr);
+	outb(EE_ENB, ee_addr);
+
+	/* Shift the read command bits out. */
+	for (i = 4 + addr_len; i >= 0; i--) {
+		int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+		outb(EE_ENB | dataval, ee_addr);
+		eeprom_delay();
+		outb(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+		eeprom_delay();
+	}
+	outb(EE_ENB, ee_addr);
+	eeprom_delay();
+
+	for (i = 16; i > 0; i--) {
+		outb(EE_ENB | EE_SHIFT_CLK, ee_addr);
+		eeprom_delay();
+		retval = (retval << 1) | ((inb(ee_addr) & EE_DATA_READ) ? 1 : 0);
+		outb(EE_ENB, ee_addr);
+		eeprom_delay();
+	}
+
+	/* Terminate the EEPROM access. */
+	outb(~EE_CS, ee_addr);
+	return retval;
+}
+
+/* MII serial management: mostly bogus for now. */
+/* Read and write the MII management registers using software-generated
+   serial MDIO protocol.
+   The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
+   met by back-to-back PCI I/O cycles, but we insert a delay to avoid
+   "overclocking" issues. */
+#define MDIO_DIR		0x80
+#define MDIO_DATA_OUT	0x04
+#define MDIO_DATA_IN	0x02
+#define MDIO_CLK		0x01
+#define MDIO_WRITE0 (MDIO_DIR)
+#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT)
+
+#define mdio_delay(mdio_addr)	inb(mdio_addr)
+
+static char mii_2_8139_map[8] = {MII_BMCR, MII_BMSR, 0, 0, NWayAdvert,
+								 NWayLPAR, NWayExpansion, 0 };
+
+/* Syncronize the MII management interface by shifting 32 one bits out. */
+static void mdio_sync(long mdio_addr)
+{
+	int i;
+
+	for (i = 32; i >= 0; i--) {
+		outb(MDIO_WRITE1, mdio_addr);
+		mdio_delay(mdio_addr);
+		outb(MDIO_WRITE1 | MDIO_CLK, mdio_addr);
+		mdio_delay(mdio_addr);
+	}
+	return;
+}
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+	long mdio_addr = dev->base_addr + MII_SMI;
+	int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+	int retval = 0;
+	int i;
+
+	if (phy_id > 31) {	/* Really a 8139.  Use internal registers. */
+		return location < 8 && mii_2_8139_map[location] ?
+			inw(dev->base_addr + mii_2_8139_map[location]) : 0;
+	}
+	mdio_sync(mdio_addr);
+	/* Shift the read command bits out. */
+	for (i = 15; i >= 0; i--) {
+		int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0;
+
+		outb(MDIO_DIR | dataval, mdio_addr);
+		mdio_delay(mdio_addr);
+		outb(MDIO_DIR | dataval | MDIO_CLK, mdio_addr);
+		mdio_delay(mdio_addr);
+	}
+
+	/* Read the two transition, 16 data, and wire-idle bits. */
+	for (i = 19; i > 0; i--) {
+		outb(0, mdio_addr);
+		mdio_delay(mdio_addr);
+		retval = (retval << 1) | ((inb(mdio_addr) & MDIO_DATA_IN) ? 1 : 0);
+		outb(MDIO_CLK, mdio_addr);
+		mdio_delay(mdio_addr);
+	}
+	return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int location,
+					   int value)
+{
+	long mdio_addr = dev->base_addr + MII_SMI;
+	int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
+	int i;
+
+	if (phy_id > 31) {	/* Really a 8139.  Use internal registers. */
+		long ioaddr = dev->base_addr;
+		if (location == 0) {
+			outb(0xC0, ioaddr + Cfg9346);
+			outw(value, ioaddr + MII_BMCR);
+			outb(0x00, ioaddr + Cfg9346);
+		} else if (location < 8  &&  mii_2_8139_map[location])
+			outw(value, ioaddr + mii_2_8139_map[location]);
+		return;
+	}
+	mdio_sync(mdio_addr);
+
+	/* Shift the command bits out. */
+	for (i = 31; i >= 0; i--) {
+		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+		outb(dataval, mdio_addr);
+		mdio_delay(mdio_addr);
+		outb(dataval | MDIO_CLK, mdio_addr);
+		mdio_delay(mdio_addr);
+	}
+	/* Clear out extra bits. */
+	for (i = 2; i > 0; i--) {
+		outb(0, mdio_addr);
+		mdio_delay(mdio_addr);
+		outb(MDIO_CLK, mdio_addr);
+		mdio_delay(mdio_addr);
+	}
+	return;
+}
+
+
+static int rtl8129_open(struct net_device *dev)
+{
+	struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int rx_buf_len_idx;
+
+	MOD_INC_USE_COUNT;
+	if (request_irq(dev->irq, &rtl8129_interrupt, SA_SHIRQ, dev->name, dev)) {
+		MOD_DEC_USE_COUNT;
+		return -EAGAIN;
+	}
+
+	/* The Rx ring allocation size is 2^N + delta, which is worst-case for
+	   the kernel binary-buddy allocation.  We allocate the Tx bounce buffers
+	   at the same time to use some of the otherwise wasted space.
+	   The delta of +16 is required for dribble-over because the receiver does
+	   not wrap when the packet terminates just beyond the end of the ring. */
+	rx_buf_len_idx = RX_BUF_LEN_IDX;
+	do {
+		tp->rx_buf_len = 8192 << rx_buf_len_idx;
+		tp->rx_ring = kmalloc(tp->rx_buf_len + 16 +
+							  (TX_BUF_SIZE * NUM_TX_DESC), GFP_KERNEL);
+	} while (tp->rx_ring == NULL  &&  --rx_buf_len_idx >= 0);
+
+	if (tp->rx_ring == NULL) {
+		if (debug > 0)
+			printk(KERN_ERR "%s: Couldn't allocate a %d byte receive ring.\n",
+				   dev->name, tp->rx_buf_len);
+		MOD_DEC_USE_COUNT;
+		return -ENOMEM;
+	}
+	tp->tx_bufs = tp->rx_ring + tp->rx_buf_len + 16;
+
+	rtl8129_init_ring(dev);
+	tp->full_duplex = tp->duplex_lock;
+	tp->tx_flag = (TX_FIFO_THRESH<<11) & 0x003f0000;
+	tp->rx_config =
+		(RX_FIFO_THRESH << 13) | (rx_buf_len_idx << 11) | (RX_DMA_BURST<<8);
+
+	rtl_hw_start(dev);
+	netif_start_tx_queue(dev);
+
+	if (tp->msg_level & NETIF_MSG_IFUP)
+		printk(KERN_DEBUG"%s: rtl8129_open() ioaddr %#lx IRQ %d"
+			   " GP Pins %2.2x %s-duplex.\n",
+			   dev->name, ioaddr, dev->irq, inb(ioaddr + GPPinData),
+			   tp->full_duplex ? "full" : "half");
+
+	/* Set the timer to switch to check for link beat and perhaps switch
+	   to an alternate media type. */
+	init_timer(&tp->timer);
+	tp->timer.expires = jiffies + 3*HZ;
+	tp->timer.data = (unsigned long)dev;
+	tp->timer.function = &rtl8129_timer;
+	add_timer(&tp->timer);
+
+	return 0;
+}
+
+/* Start the hardware at open or resume. */
+static void rtl_hw_start(struct net_device *dev)
+{
+	struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int i;
+
+	/* Soft reset the chip. */
+	outb(CmdReset, ioaddr + ChipCmd);
+	/* Check that the chip has finished the reset. */
+	for (i = 1000; i > 0; i--)
+		if ((inb(ioaddr + ChipCmd) & CmdReset) == 0)
+			break;
+	/* Restore our idea of the MAC address. */
+	outb(0xC0, ioaddr + Cfg9346);
+	outl(cpu_to_le32(*(u32*)(dev->dev_addr + 0)), ioaddr + MAC0 + 0);
+	outl(cpu_to_le32(*(u32*)(dev->dev_addr + 4)), ioaddr + MAC0 + 4);
+
+	/* Hmmm, do these belong here? */
+	tp->cur_rx = 0;
+
+	/* Must enable Tx/Rx before setting transfer thresholds! */
+	outb(CmdRxEnb | CmdTxEnb, ioaddr + ChipCmd);
+	outl(tp->rx_config, ioaddr + RxConfig);
+	/* Check this value: the documentation contradicts ifself.  Is the
+	   IFG correct with bit 28:27 zero, or with |0x03000000 ? */
+	outl((TX_DMA_BURST<<8), ioaddr + TxConfig);
+
+	/* This is check_duplex() */
+	if (tp->phys[0] >= 0  ||  (tp->drv_flags & HAS_MII_XCVR)) {
+		u16 mii_reg5 = mdio_read(dev, tp->phys[0], 5);
+		if (mii_reg5 == 0xffff)
+			;					/* Not there */
+		else if ((mii_reg5 & 0x0100) == 0x0100
+				 || (mii_reg5 & 0x00C0) == 0x0040)
+			tp->full_duplex = 1;
+		if (tp->msg_level & NETIF_MSG_LINK)
+			printk(KERN_INFO"%s: Setting %s%s-duplex based on"
+				   " auto-negotiated partner ability %4.4x.\n", dev->name,
+				   mii_reg5 == 0 ? "" :
+				   (mii_reg5 & 0x0180) ? "100mbps " : "10mbps ",
+				   tp->full_duplex ? "full" : "half", mii_reg5);
+	}
+
+	if (tp->drv_flags & HAS_MII_XCVR)			/* rtl8129 chip */
+		outb(tp->full_duplex ? 0x60 : 0x20, ioaddr + Config1);
+	outb(0x00, ioaddr + Cfg9346);
+
+	outl(virt_to_bus(tp->rx_ring), ioaddr + RxBuf);
+	/* Start the chip's Tx and Rx process. */
+	outl(0, ioaddr + RxMissed);
+	set_rx_mode(dev);
+	outb(CmdRxEnb | CmdTxEnb, ioaddr + ChipCmd);
+	/* Enable all known interrupts by setting the interrupt mask. */
+	outw(PCIErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver
+		 | TxErr | TxOK | RxErr | RxOK, ioaddr + IntrMask);
+
+}
+
+static void rtl8129_timer(unsigned long data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	struct rtl8129_private *np = (struct rtl8129_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int next_tick = 60*HZ;
+	int mii_reg5 = mdio_read(dev, np->phys[0], 5);
+
+	if (! np->duplex_lock  &&  mii_reg5 != 0xffff) {
+		int duplex = (mii_reg5&0x0100) || (mii_reg5 & 0x01C0) == 0x0040;
+		if (np->full_duplex != duplex) {
+			np->full_duplex = duplex;
+			printk(KERN_INFO "%s: Using %s-duplex based on MII #%d link"
+				   " partner ability of %4.4x.\n", dev->name,
+				   np->full_duplex ? "full" : "half", np->phys[0], mii_reg5);
+			if (np->drv_flags & HAS_MII_XCVR) {
+				outb(0xC0, ioaddr + Cfg9346);
+				outb(np->full_duplex ? 0x60 : 0x20, ioaddr + Config1);
+				outb(0x00, ioaddr + Cfg9346);
+			}
+		}
+	}
+#if LINUX_VERSION_CODE < 0x20300
+	/* Check for bogusness. */
+	if (inw(ioaddr + IntrStatus) & (TxOK | RxOK)) {
+		int status = inw(ioaddr + IntrStatus);			/* Double check */
+		if (status & (TxOK | RxOK)  &&  ! dev->interrupt) {
+			printk(KERN_ERR "%s: RTL8139 Interrupt line blocked, status %x.\n",
+				   dev->name, status);
+			rtl8129_interrupt(dev->irq, dev, 0);
+		}
+	}
+	if (dev->tbusy  &&  jiffies - dev->trans_start >= 2*TX_TIMEOUT)
+		rtl8129_tx_timeout(dev);
+#else
+	if (netif_queue_paused(dev)  &&
+		np->cur_tx - np->dirty_tx > 1  &&
+		(jiffies - dev->trans_start) > TX_TIMEOUT) {
+		rtl8129_tx_timeout(dev);
+	}
+#endif
+
+#if defined(RTL_TUNE_TWISTER)
+	/* This is a complicated state machine to configure the "twister" for
+	   impedance/echos based on the cable length.
+	   All of this is magic and undocumented.
+	   */
+	if (np->twistie) switch(np->twistie) {
+	case 1: {
+		if (inw(ioaddr + CSCR) & CSCR_LinkOKBit) {
+			/* We have link beat, let us tune the twister. */
+			outw(CSCR_LinkDownOffCmd, ioaddr + CSCR);
+			np->twistie = 2;	/* Change to state 2. */
+			next_tick = HZ/10;
+		} else {
+			/* Just put in some reasonable defaults for when beat returns. */
+			outw(CSCR_LinkDownCmd, ioaddr + CSCR);
+			outl(0x20,ioaddr + FIFOTMS);	/* Turn on cable test mode. */
+			outl(PARA78_default ,ioaddr + PARA78);
+			outl(PARA7c_default ,ioaddr + PARA7c);
+			np->twistie = 0;	/* Bail from future actions. */
+		}
+	} break;
+	case 2: {
+		/* Read how long it took to hear the echo. */
+		int linkcase = inw(ioaddr + CSCR) & CSCR_LinkStatusBits;
+		if (linkcase == 0x7000) np->twist_row = 3;
+		else if (linkcase == 0x3000) np->twist_row = 2;
+		else if (linkcase == 0x1000) np->twist_row = 1;
+		else np->twist_row = 0;
+		np->twist_col = 0;
+		np->twistie = 3;	/* Change to state 2. */
+		next_tick = HZ/10;
+	} break;
+	case 3: {
+		/* Put out four tuning parameters, one per 100msec. */
+		if (np->twist_col == 0) outw(0, ioaddr + FIFOTMS);
+		outl(param[(int)np->twist_row][(int)np->twist_col], ioaddr + PARA7c);
+		next_tick = HZ/10;
+		if (++np->twist_col >= 4) {
+			/* For short cables we are done.
+			   For long cables (row == 3) check for mistune. */
+			np->twistie = (np->twist_row == 3) ? 4 : 0;
+		}
+	} break;
+	case 4: {
+		/* Special case for long cables: check for mistune. */
+		if ((inw(ioaddr + CSCR) & CSCR_LinkStatusBits) == 0x7000) {
+			np->twistie = 0;
+			break;
+		} else {
+			outl(0xfb38de03, ioaddr + PARA7c);
+			np->twistie = 5;
+			next_tick = HZ/10;
+		}
+	} break;
+	case 5: {
+		/* Retune for shorter cable (column 2). */
+		outl(0x20,ioaddr + FIFOTMS);
+		outl(PARA78_default,  ioaddr + PARA78);
+		outl(PARA7c_default,  ioaddr + PARA7c);
+		outl(0x00,ioaddr + FIFOTMS);
+		np->twist_row = 2;
+		np->twist_col = 0;
+		np->twistie = 3;
+		next_tick = HZ/10;
+	} break;
+	}
+#endif
+
+	if (np->msg_level & NETIF_MSG_TIMER) {
+		if (np->drv_flags & HAS_MII_XCVR)
+			printk(KERN_DEBUG"%s: Media selection tick, GP pins %2.2x.\n",
+				   dev->name, inb(ioaddr + GPPinData));
+		else
+			printk(KERN_DEBUG"%s: Media selection tick, Link partner %4.4x.\n",
+				   dev->name, inw(ioaddr + NWayLPAR));
+		printk(KERN_DEBUG"%s:  Other registers are IntMask %4.4x "
+			   "IntStatus %4.4x RxStatus %4.4x.\n",
+			   dev->name, inw(ioaddr + IntrMask), inw(ioaddr + IntrStatus),
+			   (int)inl(ioaddr + RxEarlyStatus));
+		printk(KERN_DEBUG"%s:  Chip config %2.2x %2.2x.\n",
+			   dev->name, inb(ioaddr + Config0), inb(ioaddr + Config1));
+	}
+
+	np->timer.expires = jiffies + next_tick;
+	add_timer(&np->timer);
+}
+
+static void rtl8129_tx_timeout(struct net_device *dev)
+{
+	struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int status = inw(ioaddr + IntrStatus);
+	int mii_reg, i;
+
+	/* Could be wrapped with if (tp->msg_level & NETIF_MSG_TX_ERR) */
+	printk(KERN_ERR "%s: Transmit timeout, status %2.2x %4.4x "
+		   "media %2.2x.\n",
+		   dev->name, inb(ioaddr + ChipCmd), status, inb(ioaddr + GPPinData));
+
+	if (status & (TxOK | RxOK)) {
+		printk(KERN_ERR "%s: RTL8139 Interrupt line blocked, status %x.\n",
+			   dev->name, status);
+	}
+
+	/* Disable interrupts by clearing the interrupt mask. */
+	outw(0x0000, ioaddr + IntrMask);
+	/* Emit info to figure out what went wrong. */
+	printk(KERN_DEBUG "%s: Tx queue start entry %d  dirty entry %d%s.\n",
+		   dev->name, tp->cur_tx, tp->dirty_tx, tp->tx_full ? ", full" : "");
+	for (i = 0; i < NUM_TX_DESC; i++)
+		printk(KERN_DEBUG "%s:  Tx descriptor %d is %8.8x.%s\n",
+			   dev->name, i, (int)inl(ioaddr + TxStatus0 + i*4),
+			   i == tp->dirty_tx % NUM_TX_DESC ? " (queue head)" : "");
+	printk(KERN_DEBUG "%s: MII #%d registers are:", dev->name, tp->phys[0]);
+	for (mii_reg = 0; mii_reg < 8; mii_reg++)
+		printk(" %4.4x", mdio_read(dev, tp->phys[0], mii_reg));
+	printk(".\n");
+
+	/* Stop a shared interrupt from scavenging while we are. */
+	tp->dirty_tx = tp->cur_tx = 0;
+	/* Dump the unsent Tx packets. */
+	for (i = 0; i < NUM_TX_DESC; i++) {
+		if (tp->tx_skbuff[i]) {
+			dev_free_skb(tp->tx_skbuff[i]);
+			tp->tx_skbuff[i] = 0;
+			tp->stats.tx_dropped++;
+		}
+	}
+	rtl_hw_start(dev);
+	netif_unpause_tx_queue(dev);
+	tp->tx_full = 0;
+	return;
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void
+rtl8129_init_ring(struct net_device *dev)
+{
+	struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+	int i;
+
+	tp->tx_full = 0;
+	tp->dirty_tx = tp->cur_tx = 0;
+
+	for (i = 0; i < NUM_TX_DESC; i++) {
+		tp->tx_skbuff[i] = 0;
+		tp->tx_buf[i] = &tp->tx_bufs[i*TX_BUF_SIZE];
+	}
+}
+
+static int
+rtl8129_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int entry;
+
+	if (netif_pause_tx_queue(dev) != 0) {
+		/* This watchdog code is redundant with the media monitor timer. */
+		if (jiffies - dev->trans_start > TX_TIMEOUT)
+			rtl8129_tx_timeout(dev);
+		return 1;
+	}
+
+	/* Calculate the next Tx descriptor entry. */
+	entry = tp->cur_tx % NUM_TX_DESC;
+
+	tp->tx_skbuff[entry] = skb;
+	if ((long)skb->data & 3) {			/* Must use alignment buffer. */
+		memcpy(tp->tx_buf[entry], skb->data, skb->len);
+		outl(virt_to_bus(tp->tx_buf[entry]), ioaddr + TxAddr0 + entry*4);
+	} else
+		outl(virt_to_bus(skb->data), ioaddr + TxAddr0 + entry*4);
+	/* Note: the chip doesn't have auto-pad! */
+	outl(tp->tx_flag | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN),
+		 ioaddr + TxStatus0 + entry*4);
+
+	/* There is a race condition here -- we might read dirty_tx, take an
+	   interrupt that clears the Tx queue, and only then set tx_full.
+	   So we do this in two phases. */
+	if (++tp->cur_tx - tp->dirty_tx >= NUM_TX_DESC) {
+		set_bit(0, &tp->tx_full);
+		if (tp->cur_tx - (volatile unsigned int)tp->dirty_tx < NUM_TX_DESC) {
+			clear_bit(0, &tp->tx_full);
+			netif_unpause_tx_queue(dev);
+		} else
+			netif_stop_tx_queue(dev);
+	} else
+		netif_unpause_tx_queue(dev);
+
+	dev->trans_start = jiffies;
+	if (tp->msg_level & NETIF_MSG_TX_QUEUED)
+		printk(KERN_DEBUG"%s: Queued Tx packet at %p size %d to slot %d.\n",
+			   dev->name, skb->data, (int)skb->len, entry);
+
+	return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+   after the Tx thread. */
+static void rtl8129_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+{
+	struct net_device *dev = (struct net_device *)dev_instance;
+	struct rtl8129_private *np = (struct rtl8129_private *)dev->priv;
+	struct rtl8129_private *tp = np;
+	int boguscnt = np->max_interrupt_work;
+	long ioaddr = dev->base_addr;
+	int link_changed = 0;		/* Grrr, avoid bogus "uninitialized" warning */
+
+#if defined(__i386__)  &&  LINUX_VERSION_CODE < 0x20123
+	/* A lock to prevent simultaneous entry bug on Intel SMP machines. */
+	if (test_and_set_bit(0, (void*)&dev->interrupt)) {
+		printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
+			   dev->name);
+		dev->interrupt = 0;	/* Avoid halting machine. */
+		return;
+	}
+#endif
+
+	do {
+		int status = inw(ioaddr + IntrStatus);
+		/* Acknowledge all of the current interrupt sources ASAP, but
+		   an first get an additional status bit from CSCR. */
+		if (status & RxUnderrun)
+			link_changed = inw(ioaddr+CSCR) & CSCR_LinkChangeBit;
+		outw(status, ioaddr + IntrStatus);
+
+		if (tp->msg_level & NETIF_MSG_INTR)
+			printk(KERN_DEBUG"%s: interrupt  status=%#4.4x new intstat=%#4.4x.\n",
+				   dev->name, status, inw(ioaddr + IntrStatus));
+
+		if ((status & (PCIErr|PCSTimeout|RxUnderrun|RxOverflow|RxFIFOOver
+					   |TxErr|TxOK|RxErr|RxOK)) == 0)
+			break;
+
+		if (status & (RxOK|RxUnderrun|RxOverflow|RxFIFOOver))/* Rx interrupt */
+			rtl8129_rx(dev);
+
+		if (status & (TxOK | TxErr)) {
+			unsigned int dirty_tx = tp->dirty_tx;
+
+			while (tp->cur_tx - dirty_tx > 0) {
+				int entry = dirty_tx % NUM_TX_DESC;
+				int txstatus = inl(ioaddr + TxStatus0 + entry*4);
+
+				if ( ! (txstatus & (TxStatOK | TxUnderrun | TxAborted)))
+					break;			/* It still hasn't been Txed */
+
+				/* Note: TxCarrierLost is always asserted at 100mbps. */
+				if (txstatus & (TxOutOfWindow | TxAborted)) {
+					/* There was an major error, log it. */
+					if (tp->msg_level & NETIF_MSG_TX_ERR)
+						printk(KERN_NOTICE"%s: Transmit error, Tx status %8.8x.\n",
+							   dev->name, txstatus);
+					tp->stats.tx_errors++;
+					if (txstatus&TxAborted) {
+						tp->stats.tx_aborted_errors++;
+						outl(TX_DMA_BURST << 8, ioaddr + TxConfig);
+					}
+					if (txstatus&TxCarrierLost) tp->stats.tx_carrier_errors++;
+					if (txstatus&TxOutOfWindow) tp->stats.tx_window_errors++;
+#ifdef ETHER_STATS
+					if ((txstatus & 0x0f000000) == 0x0f000000)
+						tp->stats.collisions16++;
+#endif
+				} else {
+					if (tp->msg_level & NETIF_MSG_TX_DONE)
+						printk(KERN_DEBUG "%s: Transmit done, Tx status"
+							   " %8.8x.\n", dev->name, txstatus);
+					if (txstatus & TxUnderrun) {
+						/* Add 64 to the Tx FIFO threshold. */
+						if (tp->tx_flag <  0x00300000)
+							tp->tx_flag += 0x00020000;
+						tp->stats.tx_fifo_errors++;
+					}
+					tp->stats.collisions += (txstatus >> 24) & 15;
+#if LINUX_VERSION_CODE > 0x20119
+					tp->stats.tx_bytes += txstatus & 0x7ff;
+#endif
+					tp->stats.tx_packets++;
+				}
+
+				/* Free the original skb. */
+				dev_free_skb_irq(tp->tx_skbuff[entry]);
+				tp->tx_skbuff[entry] = 0;
+				if (test_bit(0, &tp->tx_full)) {
+					/* The ring is no longer full, clear tbusy. */
+					clear_bit(0, &tp->tx_full);
+					netif_resume_tx_queue(dev);
+				}
+				dirty_tx++;
+			}
+
+#ifndef final_version
+			if (tp->cur_tx - dirty_tx > NUM_TX_DESC) {
+				printk(KERN_ERR"%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+					   dev->name, dirty_tx, tp->cur_tx, (int)tp->tx_full);
+				dirty_tx += NUM_TX_DESC;
+			}
+#endif
+			tp->dirty_tx = dirty_tx;
+		}
+
+		/* Check uncommon events with one test. */
+		if (status & (PCIErr|PCSTimeout |RxUnderrun|RxOverflow|RxFIFOOver
+					  |TxErr|RxErr)) {
+			if (status == 0xffff) 			/* Missing chip! */
+				break;
+			rtl_error(dev, status, link_changed);
+		}
+
+		if (--boguscnt < 0) {
+			printk(KERN_WARNING"%s: Too much work at interrupt, "
+				   "IntrStatus=0x%4.4x.\n",
+				   dev->name, status);
+			/* Clear all interrupt sources. */
+			outw(0xffff, ioaddr + IntrStatus);
+			break;
+		}
+	} while (1);
+
+	if (tp->msg_level & NETIF_MSG_INTR)
+		printk(KERN_DEBUG"%s: exiting interrupt, intr_status=%#4.4x.\n",
+			   dev->name, inw(ioaddr + IntrStatus));
+
+#if defined(__i386__)  &&  LINUX_VERSION_CODE < 0x20123
+	clear_bit(0, (void*)&dev->interrupt);
+#endif
+	return;
+}
+
+/* The data sheet doesn't describe the Rx ring at all, so I'm guessing at the
+   field alignments and semantics. */
+static int rtl8129_rx(struct net_device *dev)
+{
+	struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	unsigned char *rx_ring = tp->rx_ring;
+	u16 cur_rx = tp->cur_rx;
+
+	if (tp->msg_level & NETIF_MSG_RX_STATUS)
+		printk(KERN_DEBUG"%s: In rtl8129_rx(), current %4.4x BufAddr %4.4x,"
+			   " free to %4.4x, Cmd %2.2x.\n",
+			   dev->name, cur_rx, inw(ioaddr + RxBufAddr),
+			   inw(ioaddr + RxBufPtr), inb(ioaddr + ChipCmd));
+
+	while ((inb(ioaddr + ChipCmd) & RxBufEmpty) == 0) {
+		int ring_offset = cur_rx % tp->rx_buf_len;
+		u32 rx_status = le32_to_cpu(*(u32*)(rx_ring + ring_offset));
+		int rx_size = rx_status >> 16; 				/* Includes the CRC. */
+
+		if (tp->msg_level & NETIF_MSG_RX_STATUS) {
+			int i;
+			printk(KERN_DEBUG"%s:  rtl8129_rx() status %4.4x, size %4.4x,"
+				   " cur %4.4x.\n",
+				   dev->name, rx_status, rx_size, cur_rx);
+			printk(KERN_DEBUG"%s: Frame contents ", dev->name);
+			for (i = 0; i < 70; i++)
+				printk(" %2.2x", rx_ring[ring_offset + i]);
+			printk(".\n");
+		}
+		if (rx_status & (RxBadSymbol|RxRunt|RxTooLong|RxCRCErr|RxBadAlign)) {
+			if (tp->msg_level & NETIF_MSG_RX_ERR)
+				printk(KERN_DEBUG"%s: Ethernet frame had errors,"
+					   " status %8.8x.\n", dev->name, rx_status);
+			if (rx_status == 0xffffffff) {
+				printk(KERN_NOTICE"%s: Invalid receive status at ring "
+					   "offset %4.4x\n", dev->name, ring_offset);
+				rx_status = 0;
+			}
+			if (rx_status & RxTooLong) {
+				if (tp->msg_level & NETIF_MSG_DRV)
+					printk(KERN_NOTICE"%s: Oversized Ethernet frame, status"
+						   " %4.4x!\n",
+						   dev->name, rx_status);
+				/* A.C.: The chip hangs here.
+				   This should never occur, which means that we are screwed
+				   when it does.
+				 */
+			}
+			tp->stats.rx_errors++;
+			if (rx_status & (RxBadSymbol|RxBadAlign))
+				tp->stats.rx_frame_errors++;
+			if (rx_status & (RxRunt|RxTooLong)) tp->stats.rx_length_errors++;
+			if (rx_status & RxCRCErr) tp->stats.rx_crc_errors++;
+			/* Reset the receiver, based on RealTek recommendation. (Bug?) */
+			tp->cur_rx = 0;
+			outb(CmdTxEnb, ioaddr + ChipCmd);
+			/* A.C.: Reset the multicast list. */
+			set_rx_mode(dev);
+			outb(CmdRxEnb | CmdTxEnb, ioaddr + ChipCmd);
+		} else {
+			/* Malloc up new buffer, compatible with net-2e. */
+			/* Omit the four octet CRC from the length. */
+			struct sk_buff *skb;
+			int pkt_size = rx_size - 4;
+
+			/* Allocate a common-sized skbuff if we are close. */
+			skb = dev_alloc_skb(1400 < pkt_size && pkt_size < PKT_BUF_SZ-2 ?
+								PKT_BUF_SZ : pkt_size + 2);
+			if (skb == NULL) {
+				printk(KERN_WARNING"%s: Memory squeeze, deferring packet.\n",
+					   dev->name);
+				/* We should check that some rx space is free.
+				   If not, free one and mark stats->rx_dropped++. */
+				tp->stats.rx_dropped++;
+				break;
+			}
+			skb->dev = dev;
+			skb_reserve(skb, 2);	/* 16 byte align the IP fields. */
+			if (ring_offset + rx_size > tp->rx_buf_len) {
+				int semi_count = tp->rx_buf_len - ring_offset - 4;
+				/* This could presumably use two calls to copy_and_sum()? */
+				memcpy(skb_put(skb, semi_count), &rx_ring[ring_offset + 4],
+					   semi_count);
+				memcpy(skb_put(skb, pkt_size-semi_count), rx_ring,
+					   pkt_size-semi_count);
+				if (tp->msg_level & NETIF_MSG_PKTDATA) {
+					int i;
+					printk(KERN_DEBUG"%s:  Frame wrap @%d",
+						   dev->name, semi_count);
+					for (i = 0; i < 16; i++)
+						printk(" %2.2x", rx_ring[i]);
+					printk(".\n");
+					memset(rx_ring, 0xcc, 16);
+				}
+			} else {
+				eth_copy_and_sum(skb, &rx_ring[ring_offset + 4],
+								 pkt_size, 0);
+				skb_put(skb, pkt_size);
+			}
+			skb->protocol = eth_type_trans(skb, dev);
+			netif_rx(skb);
+#if LINUX_VERSION_CODE > 0x20119
+			tp->stats.rx_bytes += pkt_size;
+#endif
+			tp->stats.rx_packets++;
+		}
+
+		cur_rx = (cur_rx + rx_size + 4 + 3) & ~3;
+		outw(cur_rx - 16, ioaddr + RxBufPtr);
+	}
+	if (tp->msg_level & NETIF_MSG_RX_STATUS)
+		printk(KERN_DEBUG"%s: Done rtl8129_rx(), current %4.4x BufAddr %4.4x,"
+			   " free to %4.4x, Cmd %2.2x.\n",
+			   dev->name, cur_rx, inw(ioaddr + RxBufAddr),
+			   inw(ioaddr + RxBufPtr), inb(ioaddr + ChipCmd));
+	tp->cur_rx = cur_rx;
+	return 0;
+}
+
+/* Error and abnormal or uncommon events handlers. */
+static void rtl_error(struct net_device *dev, int status, int link_changed)
+{
+	struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+
+	if (tp->msg_level & NETIF_MSG_LINK)
+		printk(KERN_NOTICE"%s: Abnormal interrupt, status %8.8x.\n",
+			   dev->name, status);
+
+	/* Update the error count. */
+	tp->stats.rx_missed_errors += inl(ioaddr + RxMissed);
+	outl(0, ioaddr + RxMissed);
+
+	if (status & RxUnderrun){
+		/* This might actually be a link change event. */
+		if ((tp->drv_flags & HAS_LNK_CHNG)  &&  link_changed) {
+			/* Really link-change on new chips. */
+			int lpar = inw(ioaddr + NWayLPAR);
+			int duplex = (lpar&0x0100) || (lpar & 0x01C0) == 0x0040
+				|| tp->duplex_lock;
+			/* Do not use MII_BMSR as that clears sticky bit. */
+			if (inw(ioaddr + GPPinData) & 0x0004) {
+				netif_link_down(dev);
+			} else
+				netif_link_up(dev);
+			if (tp->msg_level & NETIF_MSG_LINK)
+				printk(KERN_DEBUG "%s: Link changed, link partner "
+					   "%4.4x new duplex %d.\n",
+					   dev->name, lpar, duplex);
+			tp->full_duplex = duplex;
+			/* Only count as errors with no link change. */
+			status &= ~RxUnderrun;
+		} else {
+			/* If this does not work, we will do rtl_hw_start(dev); */
+			outb(CmdTxEnb, ioaddr + ChipCmd);
+			set_rx_mode(dev);	/* Reset the multicast list. */
+			outb(CmdRxEnb | CmdTxEnb, ioaddr + ChipCmd);
+
+			tp->stats.rx_errors++;
+			tp->stats.rx_fifo_errors++;
+		}
+	}
+	
+	if (status & (RxOverflow | RxErr | RxFIFOOver)) tp->stats.rx_errors++;
+	if (status & (PCSTimeout)) tp->stats.rx_length_errors++;
+	if (status & RxFIFOOver) tp->stats.rx_fifo_errors++;
+	if (status & RxOverflow) {
+		tp->stats.rx_over_errors++;
+		tp->cur_rx = inw(ioaddr + RxBufAddr) % tp->rx_buf_len;
+		outw(tp->cur_rx - 16, ioaddr + RxBufPtr);
+	}
+	if (status & PCIErr) {
+		u32 pci_cmd_status;
+		pci_read_config_dword(tp->pci_dev, PCI_COMMAND, &pci_cmd_status);
+
+		printk(KERN_ERR "%s: PCI Bus error %4.4x.\n",
+			   dev->name, pci_cmd_status);
+	}
+}
+
+static int
+rtl8129_close(struct net_device *dev)
+{
+	long ioaddr = dev->base_addr;
+	struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+	int i;
+
+	netif_stop_tx_queue(dev);
+
+	if (tp->msg_level & NETIF_MSG_IFDOWN)
+		printk(KERN_DEBUG"%s: Shutting down ethercard, status was 0x%4.4x.\n",
+			   dev->name, inw(ioaddr + IntrStatus));
+
+	/* Disable interrupts by clearing the interrupt mask. */
+	outw(0x0000, ioaddr + IntrMask);
+
+	/* Stop the chip's Tx and Rx DMA processes. */
+	outb(0x00, ioaddr + ChipCmd);
+
+	/* Update the error counts. */
+	tp->stats.rx_missed_errors += inl(ioaddr + RxMissed);
+	outl(0, ioaddr + RxMissed);
+
+	del_timer(&tp->timer);
+
+	free_irq(dev->irq, dev);
+
+	for (i = 0; i < NUM_TX_DESC; i++) {
+		if (tp->tx_skbuff[i])
+			dev_free_skb(tp->tx_skbuff[i]);
+		tp->tx_skbuff[i] = 0;
+	}
+	kfree(tp->rx_ring);
+	tp->rx_ring = 0;
+
+	/* Green! Put the chip in low-power mode. */
+	outb(0xC0, ioaddr + Cfg9346);
+	outb(tp->config1 | 0x03, ioaddr + Config1);
+	outb('H', ioaddr + HltClk);		/* 'R' would leave the clock running. */
+
+	MOD_DEC_USE_COUNT;
+
+	return 0;
+}
+
+/*
+  Handle user-level ioctl() calls.
+  We must use two numeric constants as the key because some clueless person
+  changed value for the symbolic name.
+*/
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct rtl8129_private *np = (struct rtl8129_private *)dev->priv;
+	u16 *data = (u16 *)&rq->ifr_data;
+	u32 *data32 = (void *)&rq->ifr_data;
+
+	switch(cmd) {
+	case 0x8947: case 0x89F0:
+		/* SIOCGMIIPHY: Get the address of the PHY in use. */
+		data[0] = np->phys[0] & 0x3f;
+		/* Fall Through */
+	case 0x8948: case 0x89F1:
+		/* SIOCGMIIREG: Read the specified MII register. */
+		data[3] = mdio_read(dev, data[0], data[1] & 0x1f);
+		return 0;
+	case 0x8949: case 0x89F2:
+		/* SIOCSMIIREG: Write the specified MII register */
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		if (data[0] == np->phys[0]) {
+			u16 value = data[2];
+			switch (data[1]) {
+			case 0:
+				/* Check for autonegotiation on or reset. */
+				np->medialock = (value & 0x9000) ? 0 : 1;
+				if (np->medialock)
+					np->full_duplex = (value & 0x0100) ? 1 : 0;
+				break;
+			case 4: np->advertising = value; break;
+			}
+		}
+		mdio_write(dev, data[0], data[1] & 0x1f, data[2]);
+		return 0;
+	case SIOCGPARAMS:
+		data32[0] = np->msg_level;
+		data32[1] = np->multicast_filter_limit;
+		data32[2] = np->max_interrupt_work;
+		data32[3] = 0;			/* No rx_copybreak, always copy. */
+		return 0;
+	case SIOCSPARAMS:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		np->msg_level = data32[0];
+		np->multicast_filter_limit = data32[1];
+		np->max_interrupt_work = data32[2];
+		return 0;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static struct net_device_stats *
+rtl8129_get_stats(struct net_device *dev)
+{
+	struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+
+	if (netif_running(dev)) {
+		tp->stats.rx_missed_errors += inl(ioaddr + RxMissed);
+		outl(0, ioaddr + RxMissed);
+	}
+
+	return &tp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+   This routine is not state sensitive and need not be SMP locked. */
+
+static unsigned const ethernet_polynomial = 0x04c11db7U;
+static inline u32 ether_crc(int length, unsigned char *data)
+{
+	int crc = -1;
+
+	while (--length >= 0) {
+		unsigned char current_octet = *data++;
+		int bit;
+		for (bit = 0; bit < 8; bit++, current_octet >>= 1)
+			crc = (crc << 1) ^
+				((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
+	}
+	return crc;
+}
+
+/* Bits in RxConfig. */
+enum rx_mode_bits {
+	AcceptErr=0x20, AcceptRunt=0x10, AcceptBroadcast=0x08,
+	AcceptMulticast=0x04, AcceptMyPhys=0x02, AcceptAllPhys=0x01,
+};
+
+static void set_rx_mode(struct net_device *dev)
+{
+	struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	u32 mc_filter[2];		 /* Multicast hash filter */
+	int i, rx_mode;
+
+	if (tp->msg_level & NETIF_MSG_RXFILTER)
+		printk(KERN_DEBUG"%s:   set_rx_mode(%4.4x) done -- Rx config %8.8x.\n",
+			   dev->name, dev->flags, (int)inl(ioaddr + RxConfig));
+
+	/* Note: do not reorder, GCC is clever about common statements. */
+	if (dev->flags & IFF_PROMISC) {
+		/* Unconditionally log net taps. */
+		printk(KERN_NOTICE"%s: Promiscuous mode enabled.\n", dev->name);
+		rx_mode = AcceptBroadcast|AcceptMulticast|AcceptMyPhys|AcceptAllPhys;
+		mc_filter[1] = mc_filter[0] = 0xffffffff;
+	} else if ((dev->mc_count > tp->multicast_filter_limit)
+			   || (dev->flags & IFF_ALLMULTI)) {
+		/* Too many to filter perfectly -- accept all multicasts. */
+		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+		mc_filter[1] = mc_filter[0] = 0xffffffff;
+	} else {
+		struct dev_mc_list *mclist;
+		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+		mc_filter[1] = mc_filter[0] = 0;
+		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+			 i++, mclist = mclist->next)
+			set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26, mc_filter);
+	}
+	/* We can safely update without stopping the chip. */
+	outl(tp->rx_config | rx_mode, ioaddr + RxConfig);
+	tp->mc_filter[0] = mc_filter[0];
+	tp->mc_filter[1] = mc_filter[1];
+	outl(mc_filter[0], ioaddr + MAR0 + 0);
+	outl(mc_filter[1], ioaddr + MAR0 + 4);
+	return;
+}
+
+
+static int rtl_pwr_event(void *dev_instance, int event)
+{
+	struct net_device *dev = dev_instance;
+	struct rtl8129_private *np = (struct rtl8129_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+
+	if (np->msg_level & NETIF_MSG_LINK)
+		printk("%s: Handling power event %d.\n", dev->name, event);
+	switch(event) {
+	case DRV_ATTACH:
+		MOD_INC_USE_COUNT;
+		break;
+	case DRV_SUSPEND:
+		netif_device_detach(dev);
+		/* Disable interrupts, stop Tx and Rx. */
+		outw(0x0000, ioaddr + IntrMask);
+		outb(0x00, ioaddr + ChipCmd);
+		/* Update the error counts. */
+		np->stats.rx_missed_errors += inl(ioaddr + RxMissed);
+		outl(0, ioaddr + RxMissed);
+		break;
+	case DRV_RESUME:
+		netif_device_attach(dev);
+		rtl_hw_start(dev);
+		break;
+	case DRV_DETACH: {
+		struct net_device **devp, **next;
+		if (dev->flags & IFF_UP) {
+			dev_close(dev);
+			dev->flags &= ~(IFF_UP|IFF_RUNNING);
+		}
+		unregister_netdev(dev);
+		release_region(dev->base_addr, pci_tbl[np->chip_id].io_size);
+#ifndef USE_IO_OPS
+		iounmap((char *)dev->base_addr);
+#endif
+		for (devp = &root_rtl8129_dev; *devp; devp = next) {
+			next = &((struct rtl8129_private *)(*devp)->priv)->next_module;
+			if (*devp == dev) {
+				*devp = *next;
+				break;
+			}
+		}
+		if (np->priv_addr)
+			kfree(np->priv_addr);
+		kfree(dev);
+		MOD_DEC_USE_COUNT;
+		break;
+	}
+	}
+
+	return 0;
+}
+
+#ifdef CARDBUS
+
+#include <pcmcia/driver_ops.h>
+
+static dev_node_t *rtl8139_attach(dev_locator_t *loc)
+{
+	struct net_device *dev;
+	u16 dev_id;
+	u32 pciaddr;
+	u8 bus, devfn, irq;
+	long hostaddr;
+	/* Note: the chip index should match the 8139B pci_tbl[] entry. */
+	int chip_idx = 2;
+
+	if (loc->bus != LOC_PCI) return NULL;
+	bus = loc->b.pci.bus; devfn = loc->b.pci.devfn;
+	printk(KERN_DEBUG "rtl8139_attach(bus %d, function %d)\n", bus, devfn);
+#ifdef USE_IO_OPS
+	pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_0, &pciaddr);
+	hostaddr = pciaddr & PCI_BASE_ADDRESS_IO_MASK;
+#else
+	pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_1, &pciaddr);
+	hostaddr = (long)ioremap(pciaddr & PCI_BASE_ADDRESS_MEM_MASK,
+							 pci_tbl[chip_idx].io_size);
+#endif
+	pcibios_read_config_byte(bus, devfn, PCI_INTERRUPT_LINE, &irq);
+	pcibios_read_config_word(bus, devfn, PCI_DEVICE_ID, &dev_id);
+	if (hostaddr == 0 || irq == 0) {
+		printk(KERN_ERR "The %s interface at %d/%d was not assigned an %s.\n"
+			   KERN_ERR "  It will not be activated.\n",
+			   pci_tbl[chip_idx].name, bus, devfn,
+			   hostaddr == 0 ? "address" : "IRQ");
+		return NULL;
+	}
+	dev = rtl8139_probe1(pci_find_slot(bus, devfn), NULL,
+						 hostaddr, irq, chip_idx, 0);
+	if (dev) {
+		dev_node_t *node = kmalloc(sizeof(dev_node_t), GFP_KERNEL);
+		strcpy(node->dev_name, dev->name);
+		node->major = node->minor = 0;
+		node->next = NULL;
+		MOD_INC_USE_COUNT;
+		return node;
+	}
+	return NULL;
+}
+
+static void rtl8139_detach(dev_node_t *node)
+{
+	struct net_device **devp, **next;
+	printk(KERN_INFO "rtl8139_detach(%s)\n", node->dev_name);
+	for (devp = &root_rtl8129_dev; *devp; devp = next) {
+		next = &((struct rtl8129_private *)(*devp)->priv)->next_module;
+		if (strcmp((*devp)->name, node->dev_name) == 0) break;
+	}
+	if (*devp) {
+		struct rtl8129_private *np =
+			(struct rtl8129_private *)(*devp)->priv;
+		unregister_netdev(*devp);
+		release_region((*devp)->base_addr, pci_tbl[np->chip_id].io_size);
+#ifndef USE_IO_OPS
+		iounmap((char *)(*devp)->base_addr);
+#endif
+		kfree(*devp);
+		if (np->priv_addr)
+			kfree(np->priv_addr);
+		*devp = *next;
+		kfree(node);
+		MOD_DEC_USE_COUNT;
+	}
+}
+
+struct driver_operations realtek_ops = {
+	"realtek_cb",
+	rtl8139_attach, /*rtl8139_suspend*/0, /*rtl8139_resume*/0, rtl8139_detach
+};
+
+#endif  /* Cardbus support */
+
+#ifdef MODULE
+int init_module(void)
+{
+	if (debug >= NETIF_MSG_DRV)	/* Emit version even if no cards detected. */
+		printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB);
+#ifdef CARDBUS
+	register_driver(&realtek_ops);
+	return 0;
+#else
+	return pci_drv_register(&rtl8139_drv_id, NULL);
+#endif
+}
+
+void cleanup_module(void)
+{
+	struct net_device *next_dev;
+
+#ifdef CARDBUS
+	unregister_driver(&realtek_ops);
+#else
+	pci_drv_unregister(&rtl8139_drv_id);
+#endif
+
+	while (root_rtl8129_dev) {
+		struct rtl8129_private *np = (void *)(root_rtl8129_dev->priv);
+		unregister_netdev(root_rtl8129_dev);
+		release_region(root_rtl8129_dev->base_addr,
+					   pci_tbl[np->chip_id].io_size);
+#ifndef USE_IO_OPS
+		iounmap((char *)(root_rtl8129_dev->base_addr));
+#endif
+		next_dev = np->next_module;
+		if (np->priv_addr)
+			kfree(np->priv_addr);
+		kfree(root_rtl8129_dev);
+		root_rtl8129_dev = next_dev;
+	}
+}
+
+#endif  /* MODULE */
+
+/*
+ * Local variables:
+ *  compile-command: "make KERNVER=`uname -r` rtl8139.o"
+ *  compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c rtl8139.c"
+ *  cardbus-compile-command: "gcc -DCARDBUS -DMODULE -Wall -Wstrict-prototypes -O6 -c rtl8139.c -o realtek_cb.o -I/usr/src/pcmcia/include/"
+ *  c-indent-level: 4
+ *  c-basic-offset: 4
+ *  tab-width: 4
+ * End:
+ */
diff -uNr net/drivers/net/starfire.c linux-2.4.20/drivers/net/starfire.c
--- net/drivers/net/starfire.c	2003-01-14 20:28:44.000000000 -0500
+++ linux-2.4.20/drivers/net/starfire.c	2003-01-14 20:29:35.000000000 -0500
@@ -1,10 +1,6 @@
 /* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
 /*
-	Written 1998-2000 by Donald Becker.
-
-	Current maintainer is Ion Badulescu <ionut@cs.columbia.edu>. Please
-	send all bug reports to me, and not to Donald Becker, as this code
-	has been modified quite a bit from Donald's original version.
+	Written 1998-2002 by Donald Becker.
 
 	This software may be used and distributed according to the terms of
 	the GNU General Public License (GPL), incorporated herein by reference.
@@ -18,181 +14,61 @@
 	410 Severn Ave., Suite 210
 	Annapolis MD 21403
 
-	Support and updates available at
+	Support information and updates available at
 	http://www.scyld.com/network/starfire.html
-
-	-----------------------------------------------------------
-
-	Linux kernel-specific changes:
-
-	LK1.1.1 (jgarzik):
-	- Use PCI driver interface
-	- Fix MOD_xxx races
-	- softnet fixups
-
-	LK1.1.2 (jgarzik):
-	- Merge Becker version 0.15
-
-	LK1.1.3 (Andrew Morton)
-	- Timer cleanups
-
-	LK1.1.4 (jgarzik):
-	- Merge Becker version 1.03
-
-	LK1.2.1 (Ion Badulescu <ionut@cs.columbia.edu>)
-	- Support hardware Rx/Tx checksumming
-	- Use the GFP firmware taken from Adaptec's Netware driver
-
-	LK1.2.2 (Ion Badulescu)
-	- Backported to 2.2.x
-
-	LK1.2.3 (Ion Badulescu)
-	- Fix the flaky mdio interface
-	- More compat clean-ups
-
-	LK1.2.4 (Ion Badulescu)
-	- More 2.2.x initialization fixes
-
-	LK1.2.5 (Ion Badulescu)
-	- Several fixes from Manfred Spraul
-
-	LK1.2.6 (Ion Badulescu)
-	- Fixed ifup/ifdown/ifup problem in 2.4.x
-
-	LK1.2.7 (Ion Badulescu)
-	- Removed unused code
-	- Made more functions static and __init
-
-	LK1.2.8 (Ion Badulescu)
-	- Quell bogus error messages, inform about the Tx threshold
-	- Removed #ifdef CONFIG_PCI, this driver is PCI only
-
-	LK1.2.9 (Ion Badulescu)
-	- Merged Jeff Garzik's changes from 2.4.4-pre5
-	- Added 2.2.x compatibility stuff required by the above changes
-
-	LK1.2.9a (Ion Badulescu)
-	- More updates from Jeff Garzik
-
-	LK1.3.0 (Ion Badulescu)
-	- Merged zerocopy support
-
-	LK1.3.1 (Ion Badulescu)
-	- Added ethtool support
-	- Added GPIO (media change) interrupt support
-
-	LK1.3.2 (Ion Badulescu)
-	- Fixed 2.2.x compatibility issues introduced in 1.3.1
-	- Fixed ethtool ioctl returning uninitialized memory
-
-	LK1.3.3 (Ion Badulescu)
-	- Initialize the TxMode register properly
-	- Don't dereference dev->priv after freeing it
-
-	LK1.3.4 (Ion Badulescu)
-	- Fixed initialization timing problems
-	- Fixed interrupt mask definitions
-
-	LK1.3.5 (jgarzik)
-	- ethtool NWAY_RST, GLINK, [GS]MSGLVL support
-
-	LK1.3.6:
-	- Sparc64 support and fixes (Ion Badulescu)
-	- Better stats and error handling (Ion Badulescu)
-	- Use new pci_set_mwi() PCI API function (jgarzik)
-
-TODO:
-	- implement tx_timeout() properly
-	- VLAN support
 */
 
-#define DRV_NAME	"starfire"
-#define DRV_VERSION	"1.03+LK1.3.6"
-#define DRV_RELDATE	"March 7, 2002"
-
-#include <linux/version.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/crc32.h>
-#include <asm/processor.h>		/* Processor type for cache alignment. */
-#include <asm/uaccess.h>
-#include <asm/io.h>
-
-/*
- * Adaptec's license for their Novell drivers (which is where I got the
- * firmware files) does not allow one to redistribute them. Thus, we can't
- * include the firmware with this driver.
- *
- * However, should a legal-to-use firmware become available,
- * the driver developer would need only to obtain the firmware in the
- * form of a C header file.
- * Once that's done, the #undef below must be changed into a #define
- * for this driver to really use the firmware. Note that Rx/Tx
- * hardware TCP checksumming is not possible without the firmware.
- *
- * WANTED: legal firmware to include with this GPL'd driver.
- */
-#undef HAS_FIRMWARE
-/*
- * The current frame processor firmware fails to checksum a fragment
- * of length 1. If and when this is fixed, the #define below can be removed.
- */
-#define HAS_BROKEN_FIRMWARE
-/*
- * Define this if using the driver with the zero-copy patch
- */
-#if defined(HAS_FIRMWARE) && defined(MAX_SKB_FRAGS)
-#define ZEROCOPY
-#endif
-
-#ifdef HAS_FIRMWARE
-#include "starfire_firmware.h"
-#endif /* HAS_FIRMWARE */
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"starfire.c:v1.08 11/17/2002  Written by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" Updates and info at http://www.scyld.com/network/starfire.html\n";
 
 /* The user-configurable values.
    These may be modified when a driver module is loaded.*/
 
 /* Used for tuning interrupt latency vs. overhead. */
-static int interrupt_mitigation;
+static int interrupt_mitigation = 0x0;
+
+/* Message enable level: 0..31 = no..all messages.  See NETIF_MSG docs. */
+static int debug = 2;
 
-static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
 static int max_interrupt_work = 20;
-static int mtu;
+
 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
-   The Starfire has a 512 element hash table based on the Ethernet CRC. */
-static int multicast_filter_limit = 512;
+   The Starfire has a 512 element hash table based on the Ethernet CRC.  */
+static int multicast_filter_limit = 32;
 
-#define PKT_BUF_SZ	1536		/* Size of each temporary Rx buffer.*/
-/*
- * Set the copy breakpoint for the copy-only-tiny-frames scheme.
- * Setting to > 1518 effectively disables this feature.
- *
- * NOTE:
- * The ia64 doesn't allow for unaligned loads even of integers being
- * misaligned on a 2 byte boundary. Thus always force copying of
- * packets as the starfire doesn't allow for misaligned DMAs ;-(
- * 23/10/2000 - Jes
- *
- * The Alpha and the Sparc don't allow unaligned loads, either. -Ion
- */
-#if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
-static int rx_copybreak = PKT_BUF_SZ;
-#else
-static int rx_copybreak /* = 0 */;
-#endif
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+   Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak = 0;
 
 /* Used to pass the media type, etc.
-   Both 'options[]' and 'full_duplex[]' exist for driver interoperability.
+   Both 'options[]' and 'full_duplex[]' exist for driver interoperability,
+   however full_duplex[] should never be used in new configurations.
    The media type is usually passed in 'options[]'.
+    The default is autonegotation for speed and duplex.
+	This should rarely be overridden.
+    Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
+    Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
+    Use option values 0x20 and 0x200 for forcing full duplex operation.
 */
 #define MAX_UNITS 8		/* More are supported, limit only on options */
-static int options[MAX_UNITS] = {0, };
-static int full_duplex[MAX_UNITS] = {0, };
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Automatically extracted configuration info:
+probe-func: starfire_probe
+config-in: tristate 'Adaptec DuraLAN ("starfire") series PCI Ethernet support' CONFIG_DURLAN
+
+c-help-name: Adaptec DuraLAN ("starfire") series PCI Ethernet support
+c-help-symbol: CONFIG_DURALAN
+c-help: This driver is for the Adaptec DuraLAN series, the 6915, 62022
+c-help: and 62044 boards.
+c-help: Design information, usage details and updates are available from
+c-help: http://www.scyld.com/network/starfire.html
+*/
 
 /* Operational parameters that are set at compile time. */
 
@@ -208,80 +84,101 @@
 
 /* Operational parameters that usually are not changed. */
 /* Time in jiffies before concluding the transmitter is hung. */
-#define TX_TIMEOUT	(2 * HZ)
-
-#ifdef ZEROCOPY
-#if MAX_SKB_FRAGS <= 6
-#define MAX_STARFIRE_FRAGS 6
-#else  /* MAX_STARFIRE_FRAGS > 6 */
-#warning This driver will not work with more than 6 skb fragments.
-#warning Turning off zerocopy support.
-#undef ZEROCOPY
-#endif /* MAX_STARFIRE_FRAGS > 6 */
-#endif /* ZEROCOPY */
-
-#ifdef ZEROCOPY
-#define skb_first_frag_len(skb)	skb_headlen(skb)
-#else  /* not ZEROCOPY */
-#define skb_first_frag_len(skb)	(skb->len)
-#endif /* not ZEROCOPY */
+#define TX_TIMEOUT  (6*HZ)
 
-/* 2.2.x compatibility code */
-#if LINUX_VERSION_CODE < 0x20300
-
-#include "starfire-kcomp22.h"
-
-#else  /* LINUX_VERSION_CODE > 0x20300 */
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+   Do not change this value without good reason.  This is not a limit,
+   but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ		1536
 
-#include <linux/ethtool.h>
-#include <linux/mii.h>
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning  You must compile this file with the correct options!
+#warning  See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
 
-#define COMPAT_MOD_INC_USE_COUNT
-#define COMPAT_MOD_DEC_USE_COUNT
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
 
-#define init_tx_timer(dev, func, timeout) \
-	dev->tx_timeout = func; \
-	dev->watchdog_timeo = timeout;
-#define kick_tx_timer(dev, func, timeout)
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
 
-#define netif_start_if(dev)
-#define netif_stop_if(dev)
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h>		/* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
 
-#define PCI_SLOT_NAME(pci_dev)	(pci_dev)->slot_name
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
 
-#endif /* LINUX_VERSION_CODE > 0x20300 */
-/* end of compatibility code */
+/* Condensed operations for readability.
+   Compatibility defines are in kern_compat.h */
 
+#define virt_to_le32desc(addr)  cpu_to_le32(virt_to_bus(addr))
+#define le32desc_to_virt(addr)  bus_to_virt(le32_to_cpu(addr))
 
-/* These identify the driver base version and may not be removed. */
-static char version[] __devinitdata =
-KERN_INFO "starfire.c:v1.03 7/26/2000  Written by Donald Becker <becker@scyld.com>\n"
-KERN_INFO " (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
+#if (LINUX_VERSION_CODE >= 0x20100)  &&  defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
 
 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
 MODULE_LICENSE("GPL");
-
-MODULE_PARM(max_interrupt_work, "i");
-MODULE_PARM(mtu, "i");
 MODULE_PARM(debug, "i");
-MODULE_PARM(rx_copybreak, "i");
-MODULE_PARM(interrupt_mitigation, "i");
 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(rx_copybreak, "i");
 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
-MODULE_PARM_DESC(max_interrupt_work, "Starfire maximum events handled per interrupt");
-MODULE_PARM_DESC(mtu, "Starfire MTU (all boards)");
-MODULE_PARM_DESC(debug, "Starfire debug level (0-6)");
-MODULE_PARM_DESC(rx_copybreak, "Starfire copy breakpoint for copy-only-tiny-frames");
-MODULE_PARM_DESC(options, "Starfire: Bits 0-3: media type, bit 17: full duplex");
-MODULE_PARM_DESC(full_duplex, "Starfire full duplex setting(s) (1)");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM_DESC(debug, "Driver message enable level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(max_interrupt_work,
+				 "Driver maximum events handled per interrupt");
+MODULE_PARM_DESC(full_duplex,
+				 "Non-zero to set forced full duplex (depricated).");
+MODULE_PARM_DESC(rx_copybreak,
+				 "Breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+				 "Multicast addresses before switching to Rx-all-multicast");
 
 /*
 				Theory of Operation
 
 I. Board Compatibility
 
-This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
+This driver is for the Adaptec 6915 DuraLAN "Starfire" 64 bit PCI Ethernet
+adapter, and the multiport boards using the same chip.
 
 II. Board-specific settings
 
@@ -305,9 +202,8 @@
 See the Adaptec manual for the many possible structures, and options for
 each structure.  There are far too many to document here.
 
-For transmit this driver uses type 0/1 transmit descriptors (depending
-on the presence of the zerocopy infrastructure), and relies on automatic
-minimum-length padding.  It does not use the completion queue
+For transmit this driver uses type 1 transmit descriptors, and relies on
+automatic minimum-length padding.  It does not use the completion queue
 consumer index, but instead checks for non-zero status entries.
 
 For receive this driver uses type 0 receive descriptors.  The driver
@@ -322,11 +218,10 @@
 phase of receive.
 
 A notable aspect of operation is that unaligned buffers are not permitted by
-the Starfire hardware.  Thus the IP header at offset 14 in an ethernet frame
+the Starfire hardware.  The IP header at offset 14 in an ethernet frame thus
 isn't longword aligned, which may cause problems on some machine
-e.g. Alphas and IA64. For these architectures, the driver is forced to copy
-the frame into a new skbuff unconditionally. Copied frames are put into the
-skbuff at an offset of "+2", thus 16-byte aligning the IP header.
+e.g. Alphas.  Copied frames are put into the skbuff at an offset of "+2",
+16-byte aligning the IP header.
 
 IIId. Synchronization
 
@@ -359,33 +254,28 @@
 
 
 
+static void *starfire_probe1(struct pci_dev *pdev, void *init_dev,
+							 long ioaddr, int irq, int chip_idx, int find_cnt);
+static int starfire_pwr_event(void *dev_instance, int event);
 enum chip_capability_flags {CanHaveMII=1, };
 #define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR0)
+/* And maps in 0.5MB(!) -- no I/O mapping here!  */
+#define MEM_ADDR_SZ 0x80000
 
-#if 0
-#define ADDR_64BITS 1			/* This chip uses 64 bit addresses. */
+#if 0 && (defined(__x86_64) || defined(__alpha__))
+/* Enable 64 bit address modes. */
+#define STARFIRE_ADDR_64BITS 1
 #endif
 
-#define HAS_IP_COPYSUM 1
-
-enum chipset {
-	CH_6915 = 0,
-};
-
-static struct pci_device_id starfire_pci_tbl[] __devinitdata = {
-	{ 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 },
-	{ 0, }
-};
-MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
-
-/* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
-static struct chip_info {
-	const char *name;
-	int drv_flags;
-} netdrv_tbl[] __devinitdata = {
-	{ "Adaptec Starfire 6915", CanHaveMII },
+static struct pci_id_info pci_id_tbl[] = {
+	{"Adaptec Starfire 6915", { 0x69159004, 0xffffffff, },
+	 PCI_IOTYPE, MEM_ADDR_SZ, CanHaveMII},
+	{0,},						/* 0 terminated list. */
 };
 
+struct drv_id_info starfire_drv_id = {
+	"starfire", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
+	starfire_probe1, starfire_pwr_event };
 
 /* Offsets to the device registers.
    Unlike software-only systems, device drivers interact with complex hardware.
@@ -399,40 +289,27 @@
 	PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
 	IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
 	MIICtrl=0x52000, StationAddr=0x50120, EEPROMCtrl=0x51000,
-	GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
+	TxDescCtrl=0x50090,
 	TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
 	TxRingHiAddr=0x5009C,		/* 64 bit address extension. */
 	TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
 	TxThreshold=0x500B0,
 	CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
 	RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
-	CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
+	CompletionQConsumerIdx=0x500C4,
 	RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
 	RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
-	TxMode=0x55000, PerfFilterTable=0x56000, HashTable=0x56100,
-	TxGfpMem=0x58000, RxGfpMem=0x5a000,
+	TxMode=0x55000,
 };
 
 /* Bits in the interrupt status/mask registers. */
 enum intr_status_bits {
-	IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
-	IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
-	IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
-	IntrTxComplQLow=0x200000, IntrPCI=0x100000,
-	IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
-	IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
-	IntrNormalSummary=0x8000, IntrTxDone=0x4000,
-	IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
-	IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
-	IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
-	IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40,
-	IntrNoTxCsum=0x20, IntrTxBadID=0x10,
-	IntrHiPriTxBadID=0x08, IntrRxGfp=0x04,
-	IntrTxGfp=0x02, IntrPCIPad=0x01,
-	/* not quite bits */
-	IntrRxDone=IntrRxQ2Done | IntrRxQ1Done,
-	IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low,
-	IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe,
+	IntrNormalSummary=0x8000,	IntrAbnormalSummary=0x02000000,
+	IntrRxDone=0x0300, IntrRxEmpty=0x10040, IntrRxPCIErr=0x80000,
+	IntrTxDone=0x4000, IntrTxEmpty=0x1000, IntrTxPCIErr=0x80000,
+	StatsMax=0x08000000, LinkChange=0xf0000000,
+	IntrTxDataLow=0x00040000,
+	IntrPCIPin=0x01,
 };
 
 /* Bits in the RxFilterMode register. */
@@ -441,40 +318,19 @@
 	AcceptMulticast=0x10, AcceptMyPhys=0xE040,
 };
 
-/* Bits in the TxDescCtrl register. */
-enum tx_ctrl_bits {
-	TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20,
-	TxDescSpace128=0x30, TxDescSpace256=0x40,
-	TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02,
-	TxDescType3=0x03, TxDescType4=0x04,
-	TxNoDMACompletion=0x08, TxDescQ64bit=0x80,
-	TxHiPriFIFOThreshShift=24, TxPadLenShift=16,
-	TxDMABurstSizeShift=8,
-};
-
-/* Bits in the RxDescQCtrl register. */
-enum rx_ctrl_bits {
-	RxBufferLenShift=16, RxMinDescrThreshShift=0,
-	RxPrefetchMode=0x8000, Rx2048QEntries=0x4000,
-	RxVariableQ=0x2000, RxDesc64bit=0x1000,
-	RxDescQAddr64bit=0x0100,
-	RxDescSpace4=0x000, RxDescSpace8=0x100,
-	RxDescSpace16=0x200, RxDescSpace32=0x300,
-	RxDescSpace64=0x400, RxDescSpace128=0x500,
-	RxConsumerWrEn=0x80,
-};
-
-/* Bits in the RxCompletionAddr register */
-enum rx_compl_bits {
-	RxComplQAddr64bit=0x80, TxComplProducerWrEn=0x40,
-	RxComplType0=0x00, RxComplType1=0x10,
-	RxComplType2=0x20, RxComplType3=0x30,
-	RxComplThreshShift=0,
+/* Misc. bits.  Symbolic names so that may be searched for. */
+enum misc_bits {
+	ChipResetCmd=1,				/* PCIDeviceConfig */
+	PCIIntEnb=0x00800000,		/* PCIDeviceConfig */
+	TxEnable=0x0A, RxEnable=0x05, SoftIntr=0x100, /* GenCtrl */
 };
 
 /* The Rx and Tx buffer descriptors. */
 struct starfire_rx_desc {
-	u32 rxaddr;			/* Optionally 64 bits. */
+	u32 rxaddr;					/* Optionally 64 bits. */
+#if defined(STARFIRE_ADDR_64BITS)
+	u32 rxaddr_hi;					/* Optionally 64 bits. */
+#endif
 };
 enum rx_desc_bits {
 	RxDescValid=1, RxDescEndRing=2,
@@ -483,341 +339,232 @@
 /* Completion queue entry.
    You must update the page allocation, init_ring and the shift count in rx()
    if using a larger format. */
-#ifdef HAS_FIRMWARE
-#define csum_rx_status
-#endif /* HAS_FIRMWARE */
 struct rx_done_desc {
-	u32 status;			/* Low 16 bits is length. */
-#ifdef csum_rx_status
-	u32 status2;			/* Low 16 bits is csum */
-#endif /* csum_rx_status */
+	u32 status;					/* Low 16 bits is length. */
 #ifdef full_rx_status
 	u32 status2;
 	u16 vlanid;
-	u16 csum;			/* partial checksum */
+	u16 csum; 			/* partial checksum */
 	u32 timestamp;
-#endif /* full_rx_status */
+#endif
 };
 enum rx_done_bits {
 	RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
 };
 
-#ifdef ZEROCOPY
-/* Type 0 Tx descriptor. */
-/* If more fragments are needed, don't forget to change the
-   descriptor spacing as well! */
-struct starfire_tx_desc {
-	u32 status;
-	u32 nbufs;
-	u32 first_addr;
-	u16 first_len;
-	u16 total_len;
-	struct {
-		u32 addr;
-		u32 len;
-	} frag[MAX_STARFIRE_FRAGS];
-};
-#else  /* not ZEROCOPY */
 /* Type 1 Tx descriptor. */
 struct starfire_tx_desc {
-	u32 status;			/* Upper bits are status, lower 16 length. */
-	u32 first_addr;
+	u32 status;					/* Upper bits are status, lower 16 length. */
+	u32 addr;
 };
-#endif /* not ZEROCOPY */
 enum tx_desc_bits {
-	TxDescID=0xB0000000,
-	TxCRCEn=0x01000000, TxDescIntr=0x08000000,
-	TxRingWrap=0x04000000, TxCalTCP=0x02000000,
+	TxDescID=0xB1010000,		/* Also marks single fragment, add CRC.  */
+	TxDescIntr=0x08000000, TxRingWrap=0x04000000,
 };
 struct tx_done_report {
-	u32 status;			/* timestamp, index. */
+	u32 status;					/* timestamp, index. */
 #if 0
-	u32 intrstatus;			/* interrupt status */
+	u32 intrstatus;				/* interrupt status */
 #endif
 };
 
-struct rx_ring_info {
-	struct sk_buff *skb;
-	dma_addr_t mapping;
-};
-struct tx_ring_info {
-	struct sk_buff *skb;
-	dma_addr_t first_mapping;
-#ifdef ZEROCOPY
-	dma_addr_t frag_mapping[MAX_STARFIRE_FRAGS];
-#endif /* ZEROCOPY */
-};
-
-#define PHY_CNT		2
+#define PRIV_ALIGN	15 	/* Required alignment mask */
 struct netdev_private {
 	/* Descriptor rings first for alignment. */
 	struct starfire_rx_desc *rx_ring;
 	struct starfire_tx_desc *tx_ring;
-	dma_addr_t rx_ring_dma;
-	dma_addr_t tx_ring_dma;
+	struct net_device *next_module;		/* Link for devices of this type. */
+	void *priv_addr;					/* Unaligned address for kfree */
+	const char *product_name;
 	/* The addresses of rx/tx-in-place skbuffs. */
-	struct rx_ring_info rx_info[RX_RING_SIZE];
-	struct tx_ring_info tx_info[TX_RING_SIZE];
-	/* Pointers to completion queues (full pages). */
-	struct rx_done_desc *rx_done_q;
-	dma_addr_t rx_done_q_dma;
+	struct sk_buff* rx_skbuff[RX_RING_SIZE];
+	struct sk_buff* tx_skbuff[TX_RING_SIZE];
+	u8 pad0[100];						/* Impact padding */
+	/* Pointers to completion queues (full pages).  Cache line pad.. */
+	struct rx_done_desc *rx_done_q  __attribute__((aligned (L1_CACHE_BYTES)));
 	unsigned int rx_done;
-	struct tx_done_report *tx_done_q;
-	dma_addr_t tx_done_q_dma;
+	struct tx_done_report *tx_done_q __attribute__((aligned (L1_CACHE_BYTES)));
 	unsigned int tx_done;
+
 	struct net_device_stats stats;
+	struct timer_list timer;	/* Media monitoring timer. */
+	int msg_level;
+	int chip_id, drv_flags;
 	struct pci_dev *pci_dev;
 	/* Frequently used values: keep some adjacent for cache effect. */
-	unsigned int cur_rx, dirty_rx;	/* Producer/consumer ring indices */
+	int max_interrupt_work;
+	int intr_enable;
+	unsigned int restore_intr_enable:1;	/* Set if temporarily masked.  */
+	unsigned int polling:1;				/* Erk, IRQ err. */
+
+	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
+	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
+	int rx_copybreak;
+
 	unsigned int cur_tx, dirty_tx;
-	unsigned int rx_buf_sz;		/* Based on MTU+slack. */
-	unsigned int tx_full:1,		/* The Tx queue is full. */
+	unsigned int tx_full:1;				/* The Tx queue is full. */
 	/* These values keep track of the transceiver/media in use. */
-		autoneg:1,		/* Autonegotiation allowed. */
-		full_duplex:1,		/* Full-duplex operation. */
-		speed100:1;		/* Set if speed == 100MBit. */
-	unsigned int intr_mitigation;
+	unsigned int full_duplex:1,			/* Full-duplex operation requested. */
+		medialock:1,					/* Xcvr set to fixed speed/duplex. */
+		rx_flowctrl:1,
+		tx_flowctrl:1;					/* Use 802.3x flow control. */
+	unsigned int default_port:4;		/* Last dev->if_port value. */
 	u32 tx_mode;
 	u8 tx_threshold;
-	/* MII transceiver section. */
-	u16 advertising;		/* NWay media advertisement */
-	int phy_cnt;			/* MII device addresses. */
-	unsigned char phys[PHY_CNT];	/* MII device addresses. */
-};
-
+	u32 cur_rx_mode;
+	u16 mc_filter[32];
+	int multicast_filter_limit;
 
-static int	mdio_read(struct net_device *dev, int phy_id, int location);
-static void	mdio_write(struct net_device *dev, int phy_id, int location, int value);
-static int	netdev_open(struct net_device *dev);
-static void	check_duplex(struct net_device *dev);
-static void	tx_timeout(struct net_device *dev);
-static void	init_ring(struct net_device *dev);
-static int	start_tx(struct sk_buff *skb, struct net_device *dev);
-static void	intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
-static void	netdev_error(struct net_device *dev, int intr_status);
-static int	netdev_rx(struct net_device *dev);
-static void	netdev_error(struct net_device *dev, int intr_status);
-static void	set_rx_mode(struct net_device *dev);
+	/* MII transceiver section. */
+	int mii_cnt;						/* MII device addresses. */
+	u16 advertising;					/* NWay media advertisement */
+	unsigned char phys[2];				/* MII device addresses. */
+};
+
+static int  mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location,
+					   int value);
+static int  netdev_open(struct net_device *dev);
+static int  change_mtu(struct net_device *dev, int new_mtu);
+static void check_duplex(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void tx_timeout(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static int  start_tx(struct sk_buff *skb, struct net_device *dev);
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static void netdev_error(struct net_device *dev, int intr_status);
+static int  netdev_rx(struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void set_rx_mode(struct net_device *dev);
 static struct net_device_stats *get_stats(struct net_device *dev);
-static int	netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static int	netdev_close(struct net_device *dev);
-static void	netdev_media_change(struct net_device *dev);
-
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int  netdev_close(struct net_device *dev);
 
+
 
-static int __devinit starfire_init_one(struct pci_dev *pdev,
-				       const struct pci_device_id *ent)
-{
-	struct netdev_private *np;
-	int i, irq, option, chip_idx = ent->driver_data;
-	struct net_device *dev;
-	static int card_idx = -1;
-	long ioaddr;
-	int drv_flags, io_size;
-	int boguscnt;
-#ifndef HAVE_PCI_SET_MWI
-	u16 cmd;
-	u8 cache;
-#endif
+/* A list of our installed devices, for removing the driver module. */
+static struct net_device *root_net_dev = NULL;
 
-/* when built into the kernel, we only print version if device is found */
 #ifndef MODULE
-	static int printed_version;
-	if (!printed_version++)
-		printk(version);
-#endif
-
-	card_idx++;
-
-	if (pci_enable_device (pdev))
-		return -EIO;
-
-	ioaddr = pci_resource_start(pdev, 0);
-	io_size = pci_resource_len(pdev, 0);
-	if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
-		printk (KERN_ERR DRV_NAME " %d: no PCI MEM resources, aborting\n", card_idx);
+int starfire_probe(struct net_device *dev)
+{
+	if (pci_drv_register(&starfire_drv_id, dev) < 0)
 		return -ENODEV;
-	}
-
-	dev = alloc_etherdev(sizeof(*np));
-	if (!dev) {
-		printk (KERN_ERR DRV_NAME " %d: cannot alloc etherdev, aborting\n", card_idx);
-		return -ENOMEM;
-	}
-	SET_MODULE_OWNER(dev);
-
-	irq = pdev->irq;
-
-	if (pci_request_regions (pdev, dev->name)) {
-		printk (KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", card_idx);
-		goto err_out_free_netdev;
-	}
-
-	/* ioremap is borken in Linux-2.2.x/sparc64 */
-#if !defined(CONFIG_SPARC64) || LINUX_VERSION_CODE > 0x20300
-	ioaddr = (long) ioremap(ioaddr, io_size);
-	if (!ioaddr) {
-		printk (KERN_ERR DRV_NAME " %d: cannot remap 0x%x @ 0x%lx, aborting\n",
-			card_idx, io_size, ioaddr);
-		goto err_out_free_res;
-	}
-#endif /* !CONFIG_SPARC64 || Linux 2.3.0+ */
+	printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+	return 0;
+}
+#endif
 
-	pci_set_master(pdev);
+static void *starfire_probe1(struct pci_dev *pdev, void *init_dev,
+							 long ioaddr, int irq, int chip_idx, int card_idx)
+{
+	struct net_device *dev;
+	struct netdev_private *np;
+	void *priv_mem;
+	int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
 
-#ifdef HAVE_PCI_SET_MWI
-	pci_set_mwi(pdev);
-#else
-	/* enable MWI -- it vastly improves Rx performance on sparc64 */
-	pci_read_config_word(pdev, PCI_COMMAND, &cmd);
-	cmd |= PCI_COMMAND_INVALIDATE;
-	pci_write_config_word(pdev, PCI_COMMAND, cmd);
-
-	/* set PCI cache size */
-	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
-	if ((cache << 2) != SMP_CACHE_BYTES) {
-		printk(KERN_INFO "  PCI cache line size set incorrectly "
-		       "(%i bytes) by BIOS/FW, correcting to %i\n",
-		       (cache << 2), SMP_CACHE_BYTES);
-		pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
-				      SMP_CACHE_BYTES >> 2);
-	}
-#endif
+	dev = init_etherdev(init_dev, 0);
+	if (!dev)
+		return NULL;
 
-#ifdef ZEROCOPY
-	/* Starfire can do SG and TCP/UDP checksumming */
-	dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
-#endif /* ZEROCOPY */
+	printk(KERN_INFO "%s: %s at 0x%lx, ",
+		   dev->name, pci_id_tbl[chip_idx].name, ioaddr);
 
 	/* Serial EEPROM reads are hidden by the hardware. */
 	for (i = 0; i < 6; i++)
-		dev->dev_addr[i] = readb(ioaddr + EEPROMCtrl + 20 - i);
-
-#if ! defined(final_version) /* Dump the EEPROM contents during development. */
-	if (debug > 4)
-		for (i = 0; i < 0x20; i++)
-			printk("%2.2x%s",
-			       (unsigned int)readb(ioaddr + EEPROMCtrl + i),
-			       i % 16 != 15 ? " " : "\n");
-#endif
+		dev->dev_addr[i] = readb(ioaddr + EEPROMCtrl + 20-i);
+	for (i = 0; i < 5; i++)
+		printk("%2.2x:", dev->dev_addr[i]);
+	printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
 
-	/* Issue soft reset */
-	writel(0x8000, ioaddr + TxMode);
-	udelay(1000);
-	writel(0, ioaddr + TxMode);
+	/* Make certain elements e.g. descriptor lists are aligned. */
+	priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+	/* Check for the very unlikely case of no memory. */
+	if (priv_mem == NULL)
+		return NULL;
 
 	/* Reset the chip to erase previous misconfiguration. */
-	writel(1, ioaddr + PCIDeviceConfig);
-	boguscnt = 1000;
-	while (--boguscnt > 0) {
-		udelay(10);
-		if ((readl(ioaddr + PCIDeviceConfig) & 1) == 0)
-			break;
-	}
-	if (boguscnt == 0)
-		printk("%s: chipset reset never completed!\n", dev->name);
-	/* wait a little longer */
-	udelay(1000);
+	writel(ChipResetCmd, ioaddr + PCIDeviceConfig);
 
 	dev->base_addr = ioaddr;
 	dev->irq = irq;
 
-	np = dev->priv;
-	pci_set_drvdata(pdev, dev);
+	dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+	memset(np, 0, sizeof(*np));
+	np->priv_addr = priv_mem;
+
+	np->next_module = root_net_dev;
+	root_net_dev = dev;
 
 	np->pci_dev = pdev;
-	drv_flags = netdrv_tbl[chip_idx].drv_flags;
+	np->chip_id = chip_idx;
+	np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
+	np->msg_level = (1 << debug) - 1;
+	np->rx_copybreak = rx_copybreak;
+	np->max_interrupt_work = max_interrupt_work;
+	np->multicast_filter_limit = multicast_filter_limit;
 
-	option = card_idx < MAX_UNITS ? options[card_idx] : 0;
 	if (dev->mem_start)
 		option = dev->mem_start;
 
-	/* The lower four bits are the media type. */
-	if (option & 0x200)
+	if (card_idx < MAX_UNITS  &&  full_duplex[card_idx] > 0)
 		np->full_duplex = 1;
 
-	if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
-		np->full_duplex = 1;
-
-	if (np->full_duplex)
-		np->autoneg = 0;
-	else
-		np->autoneg = 1;
-	np->speed100 = 1;
+	if (np->full_duplex) {
+		if (np->msg_level & NETIF_MSG_PROBE)
+			printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
+				   " disabled.\n", dev->name);
+		np->medialock = 1;
+	}
 
 	/* The chip-specific entries in the device structure. */
 	dev->open = &netdev_open;
 	dev->hard_start_xmit = &start_tx;
-	init_tx_timer(dev, tx_timeout, TX_TIMEOUT);
 	dev->stop = &netdev_close;
 	dev->get_stats = &get_stats;
 	dev->set_multicast_list = &set_rx_mode;
-	dev->do_ioctl = &netdev_ioctl;
-
-	if (mtu)
-		dev->mtu = mtu;
-
-	i = register_netdev(dev);
-	if (i)
-		goto err_out_cleardev;
-
-	printk(KERN_INFO "%s: %s at 0x%lx, ",
-		   dev->name, netdrv_tbl[chip_idx].name, ioaddr);
-	for (i = 0; i < 5; i++)
-		printk("%2.2x:", dev->dev_addr[i]);
-	printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+	dev->do_ioctl = &mii_ioctl;
+	dev->change_mtu = &change_mtu;
 
-	if (drv_flags & CanHaveMII) {
+	if (np->drv_flags & CanHaveMII) {
 		int phy, phy_idx = 0;
-		int mii_status;
-		for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
-			mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
-			mdelay(100);
-			boguscnt = 1000;
-			while (--boguscnt > 0)
-				if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
-					break;
-			if (boguscnt == 0) {
-				printk("%s: PHY reset never completed!\n", dev->name);
-				continue;
-			}
-			mii_status = mdio_read(dev, phy, MII_BMSR);
-			if (mii_status != 0) {
+		for (phy = 0; phy < 32 && phy_idx < 4; phy++) {
+			int mii_status = mdio_read(dev, phy, 1);
+			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 				np->phys[phy_idx++] = phy;
-				np->advertising = mdio_read(dev, phy, MII_ADVERTISE);
-				printk(KERN_INFO "%s: MII PHY found at address %d, status "
-					   "0x%4.4x advertising %4.4x.\n",
-					   dev->name, phy, mii_status, np->advertising);
-				/* there can be only one PHY on-board */
-				break;
+				np->advertising = mdio_read(dev, phy, 4);
+				if (np->msg_level & NETIF_MSG_PROBE)
+					printk(KERN_INFO "%s: MII PHY found at address %d, status "
+						   "0x%4.4x advertising %4.4x.\n",
+						   dev->name, phy, mii_status, np->advertising);
 			}
 		}
-		np->phy_cnt = phy_idx;
+		np->mii_cnt = phy_idx;
 	}
 
-#ifdef ZEROCOPY
-	printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming enabled.\n",
-	       dev->name);
-#else  /* not ZEROCOPY */
-	printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming disabled.\n",
-	       dev->name);
-#endif /* not ZEROCOPY */
-
-	return 0;
+	/* Force the media type after detecting the transceiver. */
+	if (option > 0) {
+		if (option & 0x220)
+			np->full_duplex = 1;
+		np->default_port = option & 0x3ff;
+		if (np->default_port & 0x330) {
+			np->medialock = 1;
+			if (np->msg_level & NETIF_MSG_PROBE)
+				printk(KERN_INFO "  Forcing %dMbs %s-duplex operation.\n",
+					   (option & 0x300 ? 100 : 10),
+					   (np->full_duplex ? "full" : "half"));
+			mdio_write(dev, np->phys[0], 0,
+					   ((option & 0x300) ? 0x2000 : 0) | 	/* 100mbps? */
+					   (np->full_duplex ? 0x0100 : 0)); /* Full duplex? */
+		}
+	}
 
-err_out_cleardev:
-	pci_set_drvdata(pdev, NULL);
-	iounmap((void *)ioaddr);
-err_out_free_res:
-	pci_release_regions (pdev);
-err_out_free_netdev:
-	unregister_netdev(dev);
-	kfree(dev);
-	return -ENODEV;
+	return dev;
 }
 
-
+
 /* Read the MII Management Data I/O (MDIO) interfaces. */
+
 static int mdio_read(struct net_device *dev, int phy_id, int location)
 {
 	long mdio_addr = dev->base_addr + MIICtrl + (phy_id<<7) + (location<<2);
@@ -825,15 +572,10 @@
 	/* ??? Should we add a busy-wait here? */
 	do
 		result = readl(mdio_addr);
-	while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
-	if (boguscnt == 0)
-		return 0;
-	if ((result & 0xffff) == 0xffff)
-		return 0;
+	while ((result & 0xC0000000) != 0x80000000 && --boguscnt >= 0);
 	return result & 0xffff;
 }
 
-
 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
 {
 	long mdio_addr = dev->base_addr + MIICtrl + (phy_id<<7) + (location<<2);
@@ -842,121 +584,74 @@
 	return;
 }
 
-
+
 static int netdev_open(struct net_device *dev)
 {
-	struct netdev_private *np = dev->priv;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
 	long ioaddr = dev->base_addr;
-	int i, retval;
-
-	/* Do we ever need to reset the chip??? */
+	int i;
 
-	COMPAT_MOD_INC_USE_COUNT;
+	MOD_INC_USE_COUNT;
 
-	retval = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
-	if (retval) {
-		COMPAT_MOD_DEC_USE_COUNT;
-		return retval;
+	if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
+		MOD_DEC_USE_COUNT;
+		return -EAGAIN;
 	}
 
+	/* We have no reports that indicate we need to reset the chip.
+	   But to be on the safe side... */
 	/* Disable the Rx and Tx, and reset the chip. */
 	writel(0, ioaddr + GenCtrl);
-	writel(1, ioaddr + PCIDeviceConfig);
-	if (debug > 1)
+	writel(ChipResetCmd, ioaddr + PCIDeviceConfig);
+	if (np->msg_level & NETIF_MSG_IFUP)
 		printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
-		       dev->name, dev->irq);
+			   dev->name, dev->irq);
 	/* Allocate the various queues, failing gracefully. */
 	if (np->tx_done_q == 0)
-		np->tx_done_q = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->tx_done_q_dma);
+		np->tx_done_q = (struct tx_done_report *)get_free_page(GFP_KERNEL);
 	if (np->rx_done_q == 0)
-		np->rx_done_q = pci_alloc_consistent(np->pci_dev, sizeof(struct rx_done_desc) * DONE_Q_SIZE, &np->rx_done_q_dma);
+		np->rx_done_q = (struct rx_done_desc *)get_free_page(GFP_KERNEL);
 	if (np->tx_ring == 0)
-		np->tx_ring = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->tx_ring_dma);
+		np->tx_ring = (struct starfire_tx_desc *)get_free_page(GFP_KERNEL);
 	if (np->rx_ring == 0)
-		np->rx_ring = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->rx_ring_dma);
-	if (np->tx_done_q == 0 || np->rx_done_q == 0
-		|| np->rx_ring == 0 || np->tx_ring == 0) {
-		if (np->tx_done_q)
-			pci_free_consistent(np->pci_dev, PAGE_SIZE,
-					    np->tx_done_q, np->tx_done_q_dma);
-		if (np->rx_done_q)
-			pci_free_consistent(np->pci_dev, sizeof(struct rx_done_desc) * DONE_Q_SIZE,
-					    np->rx_done_q, np->rx_done_q_dma);
-		if (np->tx_ring)
-			pci_free_consistent(np->pci_dev, PAGE_SIZE,
-					    np->tx_ring, np->tx_ring_dma);
-		if (np->rx_ring)
-			pci_free_consistent(np->pci_dev, PAGE_SIZE,
-					    np->rx_ring, np->rx_ring_dma);
-		COMPAT_MOD_DEC_USE_COUNT;
+		np->rx_ring = (struct starfire_rx_desc *)get_free_page(GFP_KERNEL);
+	if (np->tx_done_q == 0  ||  np->rx_done_q == 0
+		|| np->rx_ring == 0 ||  np->tx_ring == 0) {
+		/* Retain the pages to increase our chances next time. */
+		MOD_DEC_USE_COUNT;
 		return -ENOMEM;
 	}
 
 	init_ring(dev);
 	/* Set the size of the Rx buffers. */
-	writel((np->rx_buf_sz << RxBufferLenShift) |
-	       (0 << RxMinDescrThreshShift) |
-	       RxPrefetchMode | RxVariableQ |
-	       RxDescSpace4,
-	       ioaddr + RxDescQCtrl);
-
-#ifdef ZEROCOPY
-	/* Set Tx descriptor to type 0 and spacing to 64 bytes. */
-	writel((2 << TxHiPriFIFOThreshShift) |
-	       (0 << TxPadLenShift) |
-	       (4 << TxDMABurstSizeShift) |
-	       TxDescSpace64 | TxDescType0,
-	       ioaddr + TxDescCtrl);
-#else  /* not ZEROCOPY */
+	writel((np->rx_buf_sz<<16) | 0xA000, ioaddr + RxDescQCtrl);
+
 	/* Set Tx descriptor to type 1 and padding to 0 bytes. */
-	writel((2 << TxHiPriFIFOThreshShift) |
-	       (0 << TxPadLenShift) |
-	       (4 << TxDMABurstSizeShift) |
-	       TxDescSpaceUnlim | TxDescType1,
-	       ioaddr + TxDescCtrl);
-#endif /* not ZEROCOPY */
-
-#if defined(ADDR_64BITS) && defined(__alpha__)
-	/* XXX We really need a 64-bit PCI dma interfaces too... -DaveM */
-	writel(np->rx_ring_dma >> 32, ioaddr + RxDescQHiAddr);
-	writel(np->tx_ring_dma >> 32, ioaddr + TxRingHiAddr);
+	writel(0x02000401, ioaddr + TxDescCtrl);
+
+#if defined(STARFIRE_ADDR_64BITS)
+	writel(virt_to_bus(np->rx_ring) >> 32, ioaddr + RxDescQHiAddr);
+	writel(virt_to_bus(np->tx_ring) >> 32, ioaddr + TxRingHiAddr);
 #else
 	writel(0, ioaddr + RxDescQHiAddr);
 	writel(0, ioaddr + TxRingHiAddr);
 	writel(0, ioaddr + CompletionHiAddr);
 #endif
-	writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
-	writel(np->tx_ring_dma, ioaddr + TxRingPtr);
+	writel(virt_to_bus(np->rx_ring), ioaddr + RxDescQAddr);
+	writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
 
-	writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
-#ifdef full_rx_status
-	writel(np->rx_done_q_dma |
-	       RxComplType3 |
-	       (0 << RxComplThreshShift),
-	       ioaddr + RxCompletionAddr);
-#else  /* not full_rx_status */
-#ifdef csum_rx_status
-	writel(np->rx_done_q_dma |
-	       RxComplType2 |
-	       (0 << RxComplThreshShift),
-	       ioaddr + RxCompletionAddr);
-#else  /* not csum_rx_status */
-	writel(np->rx_done_q_dma |
-	       RxComplType0 |
-	       (0 << RxComplThreshShift),
-	       ioaddr + RxCompletionAddr);
-#endif /* not csum_rx_status */
-#endif /* not full_rx_status */
+	writel(virt_to_bus(np->tx_done_q), ioaddr + TxCompletionAddr);
+	writel(virt_to_bus(np->rx_done_q), ioaddr + RxCompletionAddr);
 
-	if (debug > 1)
-		printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
+	if (np->msg_level & NETIF_MSG_IFUP)
+		printk(KERN_DEBUG "%s:  Filling in the station address.\n", dev->name);
 
 	/* Fill both the unused Tx SA register and the Rx perfect filter. */
 	for (i = 0; i < 6; i++)
-		writeb(dev->dev_addr[i], ioaddr + StationAddr + 5 - i);
+		writeb(dev->dev_addr[i], ioaddr + StationAddr + 5-i);
 	for (i = 0; i < 16; i++) {
 		u16 *eaddrs = (u16 *)dev->dev_addr;
-		long setup_frm = ioaddr + PerfFilterTable + i * 16;
+		long setup_frm = ioaddr + 0x56000 + i*16;
 		writew(cpu_to_be16(eaddrs[2]), setup_frm); setup_frm += 4;
 		writew(cpu_to_be16(eaddrs[1]), setup_frm); setup_frm += 4;
 		writew(cpu_to_be16(eaddrs[0]), setup_frm); setup_frm += 8;
@@ -964,143 +659,210 @@
 
 	/* Initialize other registers. */
 	/* Configure the PCI bus bursts and FIFO thresholds. */
-	np->tx_mode = 0x0C04;		/* modified when link is up. */
-	writel(0x8000 | np->tx_mode, ioaddr + TxMode);
-	udelay(1000);
-	writel(np->tx_mode, ioaddr + TxMode);
+	np->tx_mode = 0;			/* Initialized when TxMode set. */
 	np->tx_threshold = 4;
 	writel(np->tx_threshold, ioaddr + TxThreshold);
+	writel(interrupt_mitigation, ioaddr + IntrTimerCtrl);
 
-	interrupt_mitigation &= 0x1f;
-	np->intr_mitigation = interrupt_mitigation;
-	writel(np->intr_mitigation, ioaddr + IntrTimerCtrl);
+	if (dev->if_port == 0)
+		dev->if_port = np->default_port;
 
-	netif_start_if(dev);
-	netif_start_queue(dev);
-
-	if (debug > 1)
-		printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
+	if (np->msg_level & NETIF_MSG_IFUP)
+		printk(KERN_DEBUG "%s:  Setting the Rx and Tx modes.\n", dev->name);
 	set_rx_mode(dev);
 
-	np->advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
+	np->advertising = mdio_read(dev, np->phys[0], 4);
 	check_duplex(dev);
-
-	/* Enable GPIO interrupts on link change */
-	writel(0x0f00ff00, ioaddr + GPIOCtrl);
+	netif_start_tx_queue(dev);
 
 	/* Set the interrupt mask and enable PCI interrupts. */
-	writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
-	       IntrTxDone | IntrStatsMax | IntrLinkChange |
-	       IntrNormalSummary | IntrAbnormalSummary |
-	       IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID,
-	       ioaddr + IntrEnable);
-	writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
-	       ioaddr + PCIDeviceConfig);
-
-#ifdef HAS_FIRMWARE
-	/* Load Rx/Tx firmware into the frame processors */
-	for (i = 0; i < FIRMWARE_RX_SIZE * 2; i++)
-		writel(firmware_rx[i], ioaddr + RxGfpMem + i * 4);
-	for (i = 0; i < FIRMWARE_TX_SIZE * 2; i++)
-		writel(firmware_tx[i], ioaddr + TxGfpMem + i * 4);
-	/* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
-	writel(0x003F, ioaddr + GenCtrl);
-#else  /* not HAS_FIRMWARE */
-	/* Enable the Rx and Tx units only. */
-	writel(0x000F, ioaddr + GenCtrl);
-#endif /* not HAS_FIRMWARE */
+	np->intr_enable = IntrRxDone | IntrRxEmpty | IntrRxPCIErr |
+		IntrTxDone | IntrTxEmpty | IntrTxPCIErr |
+		StatsMax | LinkChange | IntrNormalSummary | IntrAbnormalSummary
+		| 0x0010;
+	writel(np->intr_enable, ioaddr + IntrEnable);
+	writel(PCIIntEnb | readl(ioaddr + PCIDeviceConfig),
+		   ioaddr + PCIDeviceConfig);
+
+	/* Enable the Rx and Tx units. */
+	writel(TxEnable|RxEnable, ioaddr + GenCtrl);
 
-	if (debug > 2)
+	if (np->msg_level & NETIF_MSG_IFUP)
 		printk(KERN_DEBUG "%s: Done netdev_open().\n",
-		       dev->name);
+			   dev->name);
+
+	/* Set the timer to check for link beat. */
+	init_timer(&np->timer);
+	np->timer.expires = jiffies + 3*HZ;
+	np->timer.data = (unsigned long)dev;
+	np->timer.function = &netdev_timer;				/* timer handler */
+	add_timer(&np->timer);
 
 	return 0;
 }
 
+/* The starfire can handle frame sizes up to 64KB, but we arbitrarily
+ * limit the size.
+ */
+static int change_mtu(struct net_device *dev, int new_mtu)
+{
+	if ((new_mtu < 68) || (new_mtu > 17268))
+		return -EINVAL;
+	if (netif_running(dev))
+		return -EBUSY;
+	dev->mtu = new_mtu;
+	return 0;
+}
 
 static void check_duplex(struct net_device *dev)
 {
-	struct netdev_private *np = dev->priv;
-	u16 reg0;
-
-	mdio_write(dev, np->phys[0], MII_ADVERTISE, np->advertising);
-	mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
-	udelay(500);
-	while (mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET);
-
-	reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int new_tx_mode;
 
-	if (np->autoneg) {
-		reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
-	} else {
-		reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
-		if (np->speed100)
-			reg0 |= BMCR_SPEED100;
+	new_tx_mode = 0x0C04 | (np->tx_flowctrl ? 0x0800:0)
+		| (np->rx_flowctrl ? 0x0400:0);
+	if (np->medialock) {
 		if (np->full_duplex)
-			reg0 |= BMCR_FULLDPLX;
-		printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
-		       dev->name,
-		       np->speed100 ? "100" : "10",
-		       np->full_duplex ? "full" : "half");
+			new_tx_mode |= 2;
+	} else {
+		int mii_reg5 = mdio_read(dev, np->phys[0], 5);
+		int negotiated = mii_reg5 & np->advertising;
+		int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
+		if (duplex)
+			new_tx_mode |= 2;
+		if (np->full_duplex != duplex) {
+			np->full_duplex = duplex;
+			if (np->msg_level & NETIF_MSG_LINK)
+				printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d"
+					   " negotiated capability %4.4x.\n", dev->name,
+					   duplex ? "full" : "half", np->phys[0], negotiated);
+		}
+	}
+	if (new_tx_mode != np->tx_mode) {
+		np->tx_mode = new_tx_mode;
+		writel(np->tx_mode | 0x8000, ioaddr + TxMode);
+		writel(np->tx_mode, ioaddr + TxMode);
 	}
-	mdio_write(dev, np->phys[0], MII_BMCR, reg0);
 }
 
+/* Check for duplex changes, but mostly check for failures. */
+static void netdev_timer(unsigned long data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int status = readl(ioaddr + IntrStatus);
+	static long last_msg = 0;
+
+	/* Normally we check only every few seconds. */
+	np->timer.expires = jiffies + 60*HZ;
+
+	if (np->msg_level & NETIF_MSG_TIMER) {
+		printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x.\n",
+			   dev->name, status);
+	}
+
+	/* Check for a missing chip or failed interrupt line.
+	 * The latter may be falsely triggered, so we check twice. */
+	if (status == 0xffffffff) {
+		if (jiffies - last_msg > 10*HZ) {
+			last_msg = jiffies;
+			printk(KERN_ERR "%s: The Starfire chip is missing!\n",
+				   dev->name);
+		}
+	} else if (np->polling) {
+		if (status & IntrPCIPin) {
+			intr_handler(dev->irq, dev, 0);
+			if (jiffies - last_msg > 10*HZ) {
+				printk(KERN_ERR "%s: IRQ %d is still blocked!\n",
+					   dev->name, dev->irq);
+				last_msg = jiffies;
+			}
+		} else if (jiffies - last_msg > 10*HZ)
+			np->polling = 0;
+		np->timer.expires = jiffies + 2;
+	} else if (status & IntrPCIPin) {
+		int new_status = readl(ioaddr + IntrStatus);
+		/* Bogus hardware IRQ mapping: Fake an interrupt handler call. */
+		if (new_status & IntrPCIPin) {
+			printk(KERN_ERR "%s: IRQ %d is not raising an interrupt! "
+				   "Status %8.8x/%8.8x.  \n",
+				   dev->name, dev->irq, status, new_status);
+			intr_handler(dev->irq, dev, 0);
+			np->timer.expires = jiffies + 2;
+			np->polling = 1;
+		}
+	} else if (netif_queue_paused(dev)  &&
+			   np->cur_tx - np->dirty_tx > 1  &&
+			   (jiffies - dev->trans_start) > TX_TIMEOUT) {
+		/* This will not catch tbusy incorrectly set when the queue is empty,
+		 * but that state should never occur. */
+		tx_timeout(dev);
+	}
+
+	check_duplex(dev);
+
+	add_timer(&np->timer);
+}
 
 static void tx_timeout(struct net_device *dev)
 {
-	struct netdev_private *np = dev->priv;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
 	long ioaddr = dev->base_addr;
 
 	printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
-	       " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));
+		   " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));
 
-#ifndef __alpha__
-	{
+#if defined(__i386__)
+	if (np->msg_level & NETIF_MSG_TX_ERR) {
 		int i;
-		printk(KERN_DEBUG "  Rx ring %p: ", np->rx_ring);
-		for (i = 0; i < RX_RING_SIZE; i++)
-			printk(" %8.8x", (unsigned int)le32_to_cpu(np->rx_ring[i].rxaddr));
-		printk("\n"KERN_DEBUG"  Tx ring %p: ", np->tx_ring);
+		printk("\n" KERN_DEBUG "  Tx ring %p: ", np->tx_ring);
 		for (i = 0; i < TX_RING_SIZE; i++)
-			printk(" %4.4x", le32_to_cpu(np->tx_ring[i].status));
+			printk(" %4.4x", np->tx_ring[i].status);
+		printk("\n" KERN_DEBUG "  Rx ring %p: ", np->rx_ring);
+		for (i = 0; i < RX_RING_SIZE; i++)
+			printk(" %8.8x", (unsigned int)np->rx_ring[i].rxaddr);
 		printk("\n");
 	}
 #endif
 
-	/* Perhaps we should reinitialize the hardware here. */
+	/* If a specific problem is reported, reinitialize the hardware here. */
+	dev->if_port = 0;
 	/* Stop and restart the chip's Tx processes . */
-
-	/* Trigger an immediate transmit demand. */
+	writel(0, ioaddr + GenCtrl);
+	/* Enable the Rx and Tx units. */
+	writel(TxEnable|RxEnable, ioaddr + GenCtrl);
 
 	dev->trans_start = jiffies;
 	np->stats.tx_errors++;
-	netif_wake_queue(dev);
+	return;
 }
 
 
 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
 static void init_ring(struct net_device *dev)
 {
-	struct netdev_private *np = dev->priv;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
 	int i;
 
 	np->tx_full = 0;
 	np->cur_rx = np->cur_tx = 0;
 	np->dirty_rx = np->rx_done = np->dirty_tx = np->tx_done = 0;
 
-	np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+	np->rx_buf_sz = (dev->mtu <= 1522 ? PKT_BUF_SZ :
+					 (dev->mtu + 14 + 3) & ~3);	/* Round to word. */
 
 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
 	for (i = 0; i < RX_RING_SIZE; i++) {
 		struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
-		np->rx_info[i].skb = skb;
+		np->rx_skbuff[i] = skb;
 		if (skb == NULL)
 			break;
-		np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
 		skb->dev = dev;			/* Mark as being used by this device. */
 		/* Grrr, we cannot offset to correctly align the IP header. */
-		np->rx_ring[i].rxaddr = cpu_to_le32(np->rx_info[i].mapping | RxDescValid);
+		np->rx_ring[i].rxaddr =
+			virt_to_le32desc(skb->tail) | cpu_to_le32(RxDescValid);
 	}
 	writew(i - 1, dev->base_addr + RxDescQIdx);
 	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
@@ -1108,8 +870,7 @@
 	/* Clear the remainder of the Rx buffer ring. */
 	for (  ; i < RX_RING_SIZE; i++) {
 		np->rx_ring[i].rxaddr = 0;
-		np->rx_info[i].skb = NULL;
-		np->rx_info[i].mapping = 0;
+		np->rx_skbuff[i] = 0;
 	}
 	/* Mark the last entry as wrapping the ring. */
 	np->rx_ring[i-1].rxaddr |= cpu_to_le32(RxDescEndRing);
@@ -1121,30 +882,25 @@
 	}
 
 	for (i = 0; i < TX_RING_SIZE; i++) {
-		np->tx_info[i].skb = NULL;
-		np->tx_info[i].first_mapping = 0;
-#ifdef ZEROCOPY
-		{
-			int j;
-			for (j = 0; j < MAX_STARFIRE_FRAGS; j++)
-				np->tx_info[i].frag_mapping[j] = 0;
-		}
-#endif /* ZEROCOPY */
+		np->tx_skbuff[i] = 0;
 		np->tx_ring[i].status = 0;
 	}
 	return;
 }
 
-
 static int start_tx(struct sk_buff *skb, struct net_device *dev)
 {
-	struct netdev_private *np = dev->priv;
-	unsigned int entry;
-#ifdef ZEROCOPY
-	int i;
-#endif
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	unsigned entry;
 
-	kick_tx_timer(dev, tx_timeout, TX_TIMEOUT);
+	/* Block a timer-based transmit from overlapping.  This happens when
+	   packets are presumed lost, and we use this check the Tx status. */
+	if (netif_pause_tx_queue(dev) != 0) {
+		/* This watchdog code is redundant with the media monitor timer. */
+		if (jiffies - dev->trans_start > TX_TIMEOUT)
+			tx_timeout(dev);
+		return 1;
+	}
 
 	/* Caution: the write order is important here, set the field
 	   with the "ownership" bits last. */
@@ -1152,109 +908,48 @@
 	/* Calculate the next Tx descriptor entry. */
 	entry = np->cur_tx % TX_RING_SIZE;
 
-#if defined(ZEROCOPY) && defined(HAS_FIRMWARE) && defined(HAS_BROKEN_FIRMWARE)
-	{
-		int has_bad_length = 0;
-
-		if (skb_first_frag_len(skb) == 1)
-			has_bad_length = 1;
-		else {
-			for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
-				if (skb_shinfo(skb)->frags[i].size == 1) {
-					has_bad_length = 1;
-					break;
-				}
-		}
-
-		if (has_bad_length)
-			skb_checksum_help(skb);
-	}
-#endif /* ZEROCOPY && HAS_FIRMWARE && HAS_BROKEN_FIRMWARE */
-
-	np->tx_info[entry].skb = skb;
-	np->tx_info[entry].first_mapping =
-		pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
-
-	np->tx_ring[entry].first_addr = cpu_to_le32(np->tx_info[entry].first_mapping);
-#ifdef ZEROCOPY
-	np->tx_ring[entry].first_len = cpu_to_le16(skb_first_frag_len(skb));
-	np->tx_ring[entry].total_len = cpu_to_le16(skb->len);
-	/* Add "| TxDescIntr" to generate Tx-done interrupts. */
-	np->tx_ring[entry].status = cpu_to_le32(TxDescID | TxCRCEn);
-	np->tx_ring[entry].nbufs = cpu_to_le32(skb_shinfo(skb)->nr_frags + 1);
-#else  /* not ZEROCOPY */
-	/* Add "| TxDescIntr" to generate Tx-done interrupts. */
-	np->tx_ring[entry].status = cpu_to_le32(skb->len | TxDescID | TxCRCEn | 1 << 16);
-#endif /* not ZEROCOPY */
+	np->tx_skbuff[entry] = skb;
 
-	if (entry >= TX_RING_SIZE-1)		 /* Wrap ring */
+	np->tx_ring[entry].addr = virt_to_le32desc(skb->data);
+	/* Add  "| TxDescIntr" to generate Tx-done interrupts. */
+	np->tx_ring[entry].status = cpu_to_le32(skb->len | TxDescID);
+#if 1
+	if (entry >= TX_RING_SIZE-1) {		 /* Wrap ring */
 		np->tx_ring[entry].status |= cpu_to_le32(TxRingWrap | TxDescIntr);
-
-#ifdef ZEROCOPY
-	if (skb->ip_summed == CHECKSUM_HW) {
-		np->tx_ring[entry].status |= cpu_to_le32(TxCalTCP);
-		np->stats.tx_compressed++;
-	}
-#endif /* ZEROCOPY */
-
-	if (debug > 5) {
-#ifdef ZEROCOPY
-		printk(KERN_DEBUG "%s: Tx #%d slot %d status %8.8x nbufs %d len %4.4x/%4.4x.\n",
-		       dev->name, np->cur_tx, entry,
-		       le32_to_cpu(np->tx_ring[entry].status),
-		       le32_to_cpu(np->tx_ring[entry].nbufs),
-		       le32_to_cpu(np->tx_ring[entry].first_len),
-		       le32_to_cpu(np->tx_ring[entry].total_len));
-#else  /* not ZEROCOPY */
-		printk(KERN_DEBUG "%s: Tx #%d slot %d status %8.8x.\n",
-		       dev->name, np->cur_tx, entry,
-		       le32_to_cpu(np->tx_ring[entry].status));
-#endif /* not ZEROCOPY */
-	}
-
-#ifdef ZEROCOPY
-	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-		skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i];
-
-		/* we already have the proper value in entry */
-		np->tx_info[entry].frag_mapping[i] =
-			pci_map_single(np->pci_dev, page_address(this_frag->page) + this_frag->page_offset, this_frag->size, PCI_DMA_TODEVICE);
-
-		np->tx_ring[entry].frag[i].addr = cpu_to_le32(np->tx_info[entry].frag_mapping[i]);
-		np->tx_ring[entry].frag[i].len = cpu_to_le32(this_frag->size);
-		if (debug > 5) {
-			printk(KERN_DEBUG "%s: Tx #%d frag %d len %4.4x.\n",
-			       dev->name, np->cur_tx, i,
-			       le32_to_cpu(np->tx_ring[entry].frag[i].len));
-		}
-	}
-#endif /* ZEROCOPY */
-
-	np->cur_tx++;
-
-	if (entry >= TX_RING_SIZE-1)		 /* Wrap ring */
 		entry = -1;
-	entry++;
+	}
+#endif
 
-	/* Non-x86: explicitly flush descriptor cache lines here. */
-	/* Ensure everything is written back above before the transmit is
-	   initiated. - Jes */
-	wmb();
+	/* On some architectures better performance results by explicitly
+	   flushing cache lines: pci_flush_virt(skb->data, skb->len); */
 
+	np->cur_tx++;
 	/* Update the producer index. */
-	writel(entry * (sizeof(struct starfire_tx_desc) / 8), dev->base_addr + TxProducerIdx);
+	writel(++entry, dev->base_addr + TxProducerIdx);
 
+	/* cf. using TX_QUEUE_LEN instead of TX_RING_SIZE here. */
 	if (np->cur_tx - np->dirty_tx >= TX_RING_SIZE - 1) {
 		np->tx_full = 1;
-		netif_stop_queue(dev);
-	}
+		/* Check for the rare case of a just-cleared queue. */
+		if (np->cur_tx - (volatile unsigned int)np->dirty_tx
+			< TX_RING_SIZE - 2) {
+			np->tx_full = 0;
+			netif_unpause_tx_queue(dev);
+		} else
+			netif_stop_tx_queue(dev);
+	} else
+		netif_unpause_tx_queue(dev);		/* Typical path */
 
 	dev->trans_start = jiffies;
 
+	if (np->msg_level & NETIF_MSG_TX_QUEUED) {
+		printk(KERN_DEBUG "%s: Tx frame #%d slot %d  %8.8x %8.8x.\n",
+			   dev->name, np->cur_tx, entry,
+			   np->tx_ring[entry].status, np->tx_ring[entry].addr);
+	}
 	return 0;
 }
 
-
 /* The interrupt handler does all of the Rx thread work and cleans up
    after the Tx thread. */
 static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
@@ -1262,28 +957,28 @@
 	struct net_device *dev = (struct net_device *)dev_instance;
 	struct netdev_private *np;
 	long ioaddr;
-	int boguscnt = max_interrupt_work;
-	int consumer;
-	int tx_status;
+	int boguscnt;
 
 #ifndef final_version			/* Can never occur. */
 	if (dev == NULL) {
-		printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown device.\n", irq);
+		printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown "
+				"device.\n", irq);
 		return;
 	}
 #endif
 
 	ioaddr = dev->base_addr;
-	np = dev->priv;
+	np = (struct netdev_private *)dev->priv;
+	boguscnt = np->max_interrupt_work;
 
 	do {
 		u32 intr_status = readl(ioaddr + IntrClear);
 
-		if (debug > 4)
+		if (np->msg_level & NETIF_MSG_INTR)
 			printk(KERN_DEBUG "%s: Interrupt status %4.4x.\n",
-			       dev->name, intr_status);
+				   dev->name, intr_status);
 
-		if (intr_status == 0)
+		if (intr_status == 0 || intr_status == 0xffffffff)
 			break;
 
 		if (intr_status & IntrRxDone)
@@ -1292,208 +987,144 @@
 		/* Scavenge the skbuff list based on the Tx-done queue.
 		   There are redundant checks here that may be cleaned up
 		   after the driver has proven to be reliable. */
-		consumer = readl(ioaddr + TxConsumerIdx);
-		if (debug > 4)
-			printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
-			       dev->name, consumer);
+		{
+			int consumer = readl(ioaddr + TxConsumerIdx);
+			int tx_status;
+			if (np->msg_level & NETIF_MSG_INTR)
+				printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
+					   dev->name, consumer);
 #if 0
-		if (np->tx_done >= 250 || np->tx_done == 0)
-			printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x, %d is %8.8x.\n",
-			       dev->name, np->tx_done,
-			       le32_to_cpu(np->tx_done_q[np->tx_done].status),
-			       (np->tx_done+1) & (DONE_Q_SIZE-1),
-			       le32_to_cpu(np->tx_done_q[(np->tx_done+1)&(DONE_Q_SIZE-1)].status));
-#endif
-
-		while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
-			if (debug > 4)
-				printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x.\n",
-				       dev->name, np->tx_done, tx_status);
-			if ((tx_status & 0xe0000000) == 0xa0000000) {
-				np->stats.tx_packets++;
-			} else if ((tx_status & 0xe0000000) == 0x80000000) {
-				struct sk_buff *skb;
-#ifdef ZEROCOPY
-				int i;
-#endif /* ZEROCOPY */
-				u16 entry = tx_status;		/* Implicit truncate */
-				entry /= sizeof(struct starfire_tx_desc);
-
-				skb = np->tx_info[entry].skb;
-				np->tx_info[entry].skb = NULL;
-				pci_unmap_single(np->pci_dev,
-						 np->tx_info[entry].first_mapping,
-						 skb_first_frag_len(skb),
-						 PCI_DMA_TODEVICE);
-				np->tx_info[entry].first_mapping = 0;
-
-#ifdef ZEROCOPY
-				for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-					pci_unmap_single(np->pci_dev,
-							 np->tx_info[entry].frag_mapping[i],
-							 skb_shinfo(skb)->frags[i].size,
-							 PCI_DMA_TODEVICE);
-					np->tx_info[entry].frag_mapping[i] = 0;
+			if (np->tx_done >= 250  || np->tx_done == 0)
+				printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x, "
+					   "%d is %8.8x.\n", dev->name,
+					   np->tx_done, np->tx_done_q[np->tx_done].status,
+					   (np->tx_done+1) & (DONE_Q_SIZE-1),
+					   np->tx_done_q[(np->tx_done+1)&(DONE_Q_SIZE-1)].status);
+#endif
+			while ((tx_status = cpu_to_le32(np->tx_done_q[np->tx_done].status))
+				   != 0) {
+				if (np->msg_level & NETIF_MSG_TX_DONE)
+					printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x.\n",
+						   dev->name, np->tx_done, tx_status);
+				if ((tx_status & 0xe0000000) == 0xa0000000) {
+					np->stats.tx_packets++;
+				} else if ((tx_status & 0xe0000000) == 0x80000000) {
+					u16 entry = tx_status; 		/* Implicit truncate */
+					entry >>= 3;
+					/* Scavenge the descriptor. */
+					if (np->tx_skbuff[entry]) {
+						dev_free_skb_irq(np->tx_skbuff[entry]);
+					} else
+						printk(KERN_WARNING "%s: Null skbuff at entry %d!!!\n",
+							   dev->name, entry);
+					np->tx_skbuff[entry] = 0;
+					np->dirty_tx++;
 				}
-#endif /* ZEROCOPY */
-
-				/* Scavenge the descriptor. */
-				dev_kfree_skb_irq(skb);
-
-				np->dirty_tx++;
+				np->tx_done_q[np->tx_done].status = 0;
+				np->tx_done = (np->tx_done+1) & (DONE_Q_SIZE-1);
 			}
-			np->tx_done_q[np->tx_done].status = 0;
-			np->tx_done = (np->tx_done+1) & (DONE_Q_SIZE-1);
+			writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
 		}
-		writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
-
 		if (np->tx_full && np->cur_tx - np->dirty_tx < TX_RING_SIZE - 4) {
-			/* The ring is no longer full, wake the queue. */
+			/* The ring is no longer full, allow new TX entries. */
 			np->tx_full = 0;
-			netif_wake_queue(dev);
-		}
-
-		/* Stats overflow */
-		if (intr_status & IntrStatsMax) {
-			get_stats(dev);
+			netif_resume_tx_queue(dev);
 		}
 
-		/* Media change interrupt. */
-		if (intr_status & IntrLinkChange)
-			netdev_media_change(dev);
-
 		/* Abnormal error summary/uncommon events handlers. */
 		if (intr_status & IntrAbnormalSummary)
 			netdev_error(dev, intr_status);
 
 		if (--boguscnt < 0) {
 			printk(KERN_WARNING "%s: Too much work at interrupt, "
-			       "status=0x%4.4x.\n",
-			       dev->name, intr_status);
+				   "status=0x%4.4x.\n",
+				   dev->name, intr_status);
+			writel(0x0021, ioaddr + IntrTimerCtrl);
 			break;
 		}
 	} while (1);
 
-	if (debug > 4)
+	if (np->msg_level & NETIF_MSG_INTR)
 		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
-		       dev->name, (int)readl(ioaddr + IntrStatus));
+			   dev->name, (int)readl(ioaddr + IntrStatus));
 
-#ifndef final_version
-	/* Code that should never be run!  Remove after testing.. */
-	{
-		static int stopit = 10;
-		if (!netif_running(dev) && --stopit < 0) {
-			printk(KERN_ERR "%s: Emergency stop, looping startup interrupt.\n",
-			       dev->name);
-			free_irq(irq, dev);
-		}
-	}
-#endif
+	return;
 }
 
-
 /* This routine is logically part of the interrupt handler, but separated
    for clarity and better register allocation. */
 static int netdev_rx(struct net_device *dev)
 {
-	struct netdev_private *np = dev->priv;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
 	int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
 	u32 desc_status;
 
 	if (np->rx_done_q == 0) {
 		printk(KERN_ERR "%s:  rx_done_q is NULL!  rx_done is %d. %p.\n",
-		       dev->name, np->rx_done, np->tx_done_q);
+			   dev->name, np->rx_done, np->tx_done_q);
 		return 0;
 	}
 
 	/* If EOP is set on the next entry, it's a new packet. Send it up. */
 	while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
-		struct sk_buff *skb;
-		u16 pkt_len;
-		int entry;
-
-		if (debug > 4)
-			printk(KERN_DEBUG "  netdev_rx() status of %d was %8.8x.\n", np->rx_done, desc_status);
+		if (np->msg_level & NETIF_MSG_RX_STATUS)
+			printk(KERN_DEBUG "  netdev_rx() status of %d was %8.8x.\n",
+				   np->rx_done, desc_status);
 		if (--boguscnt < 0)
 			break;
 		if ( ! (desc_status & RxOK)) {
 			/* There was a error. */
-			if (debug > 2)
-				printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n", desc_status);
+			if (np->msg_level & NETIF_MSG_RX_ERR)
+				printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
+					   desc_status);
 			np->stats.rx_errors++;
 			if (desc_status & RxFIFOErr)
 				np->stats.rx_fifo_errors++;
-			goto next_rx;
-		}
-
-		pkt_len = desc_status;	/* Implicitly Truncate */
-		entry = (desc_status >> 16) & 0x7ff;
+		} else {
+			struct sk_buff *skb;
+			u16 pkt_len = desc_status;			/* Implicitly Truncate */
+			int entry = (desc_status >> 16) & 0x7ff;
 
 #ifndef final_version
-		if (debug > 4)
-			printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d, bogus_cnt %d.\n", pkt_len, boguscnt);
+			if (np->msg_level & NETIF_MSG_RX_STATUS)
+				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
+					   ", bogus_cnt %d.\n",
+					   pkt_len, boguscnt);
 #endif
-		/* Check if the packet is long enough to accept without copying
-		   to a minimally-sized skbuff. */
-		if (pkt_len < rx_copybreak
-		    && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
-			skb->dev = dev;
-			skb_reserve(skb, 2);	/* 16 byte align the IP header */
-			pci_dma_sync_single(np->pci_dev,
-					    np->rx_info[entry].mapping,
-					    pkt_len, PCI_DMA_FROMDEVICE);
+			/* Check if the packet is long enough to accept without copying
+			   to a minimally-sized skbuff. */
+			if (pkt_len < rx_copybreak
+				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+				skb->dev = dev;
+				skb_reserve(skb, 2);	/* 16 byte align the IP header */
 #if HAS_IP_COPYSUM			/* Call copy + cksum if available. */
-			eth_copy_and_sum(skb, np->rx_info[entry].skb->tail, pkt_len, 0);
-			skb_put(skb, pkt_len);
+				eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+				skb_put(skb, pkt_len);
 #else
-			memcpy(skb_put(skb, pkt_len), np->rx_info[entry].skb->tail, pkt_len);
+				memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
+					   pkt_len);
 #endif
-		} else {
-			pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
-			skb = np->rx_info[entry].skb;
-			skb_put(skb, pkt_len);
-			np->rx_info[entry].skb = NULL;
-			np->rx_info[entry].mapping = 0;
-		}
-#ifndef final_version			/* Remove after testing. */
-		/* You will want this info for the initial debug. */
-		if (debug > 5)
-			printk(KERN_DEBUG "  Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
-			       "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
-			       "%d.%d.%d.%d.\n",
-			       skb->data[0], skb->data[1], skb->data[2], skb->data[3],
-			       skb->data[4], skb->data[5], skb->data[6], skb->data[7],
-			       skb->data[8], skb->data[9], skb->data[10],
-			       skb->data[11], skb->data[12], skb->data[13],
-			       skb->data[14], skb->data[15], skb->data[16],
-			       skb->data[17]);
-#endif
-		skb->protocol = eth_type_trans(skb, dev);
-#if defined(full_rx_status) || defined(csum_rx_status)
-		if (le32_to_cpu(np->rx_done_q[np->rx_done].status2) & 0x01000000) {
-			skb->ip_summed = CHECKSUM_UNNECESSARY;
-			np->stats.rx_compressed++;
-		}
-		/*
-		 * This feature doesn't seem to be working, at least
-		 * with the two firmware versions I have. If the GFP sees
-		 * a fragment, it either ignores it completely, or reports
-		 * "bad checksum" on it.
-		 *
-		 * Maybe I missed something -- corrections are welcome.
-		 * Until then, the printk stays. :-) -Ion
-		 */
-		else if (le32_to_cpu(np->rx_done_q[np->rx_done].status2) & 0x00400000) {
-			skb->ip_summed = CHECKSUM_HW;
-			skb->csum = le32_to_cpu(np->rx_done_q[np->rx_done].status2) & 0xffff;
-			printk(KERN_DEBUG "%s: checksum_hw, status2 = %x\n", dev->name, np->rx_done_q[np->rx_done].status2);
-		}
-#endif
-		netif_rx(skb);
-		dev->last_rx = jiffies;
-		np->stats.rx_packets++;
-
-next_rx:
+			} else {
+				char *temp = skb_put(skb = np->rx_skbuff[entry], pkt_len);
+				np->rx_skbuff[entry] = NULL;
+#ifndef final_version				/* Remove after testing. */
+				if (le32desc_to_virt(np->rx_ring[entry].rxaddr & ~3) != temp)
+					printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
+						   "do not match in netdev_rx: %p vs. %p / %p.\n",
+						   dev->name,
+						   le32desc_to_virt(np->rx_ring[entry].rxaddr),
+						   skb->head, temp);
+#endif
+			}
+			skb->protocol = eth_type_trans(skb, dev);
+#ifdef full_rx_status
+			if (np->rx_done_q[np->rx_done].status2 & cpu_to_le32(0x01000000))
+				skb->ip_summed = CHECKSUM_UNNECESSARY;
+#endif
+			netif_rx(skb);
+			dev->last_rx = jiffies;
+			np->stats.rx_packets++;
+		}
 		np->cur_rx++;
 		np->rx_done_q[np->rx_done].status = 0;
 		np->rx_done = (np->rx_done + 1) & (DONE_Q_SIZE-1);
@@ -1504,16 +1135,14 @@
 	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
 		struct sk_buff *skb;
 		int entry = np->dirty_rx % RX_RING_SIZE;
-		if (np->rx_info[entry].skb == NULL) {
+		if (np->rx_skbuff[entry] == NULL) {
 			skb = dev_alloc_skb(np->rx_buf_sz);
-			np->rx_info[entry].skb = skb;
+			np->rx_skbuff[entry] = skb;
 			if (skb == NULL)
-				break;	/* Better luck next round. */
-			np->rx_info[entry].mapping =
-				pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
-			skb->dev = dev;	/* Mark as being used by this device. */
+				break;				/* Better luck next round. */
+			skb->dev = dev;			/* Mark as being used by this device. */
 			np->rx_ring[entry].rxaddr =
-				cpu_to_le32(np->rx_info[entry].mapping | RxDescValid);
+				virt_to_le32desc(skb->tail) | cpu_to_le32(RxDescValid);
 		}
 		if (entry == RX_RING_SIZE - 1)
 			np->rx_ring[entry].rxaddr |= cpu_to_le32(RxDescEndRing);
@@ -1521,122 +1150,74 @@
 		writew(entry, dev->base_addr + RxDescQIdx);
 	}
 
-	if (debug > 5)
-		printk(KERN_DEBUG "  exiting netdev_rx() status of %d was %8.8x.\n",
-		       np->rx_done, desc_status);
+	if ((np->msg_level & NETIF_MSG_RX_STATUS)
+		|| memcmp(np->pad0, np->pad0 + 1, sizeof(np->pad0) -1))
+		printk(KERN_DEBUG "  exiting netdev_rx() status of %d was %8.8x %d.\n",
+			   np->rx_done, desc_status,
+			   memcmp(np->pad0, np->pad0 + 1, sizeof(np->pad0) -1));
 
-	/* Restart Rx engine if stopped. */
 	return 0;
 }
 
-
-static void netdev_media_change(struct net_device *dev)
-{
-	struct netdev_private *np = dev->priv;
-	long ioaddr = dev->base_addr;
-	u16 reg0, reg1, reg4, reg5;
-	u32 new_tx_mode;
-
-	/* reset status first */
-	mdio_read(dev, np->phys[0], MII_BMCR);
-	mdio_read(dev, np->phys[0], MII_BMSR);
-
-	reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
-	reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
-
-	if (reg1 & BMSR_LSTATUS) {
-		/* link is up */
-		if (reg0 & BMCR_ANENABLE) {
-			/* autonegotiation is enabled */
-			reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
-			reg5 = mdio_read(dev, np->phys[0], MII_LPA);
-			if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
-				np->speed100 = 1;
-				np->full_duplex = 1;
-			} else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
-				np->speed100 = 1;
-				np->full_duplex = 0;
-			} else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
-				np->speed100 = 0;
-				np->full_duplex = 1;
-			} else {
-				np->speed100 = 0;
-				np->full_duplex = 0;
-			}
-		} else {
-			/* autonegotiation is disabled */
-			if (reg0 & BMCR_SPEED100)
-				np->speed100 = 1;
-			else
-				np->speed100 = 0;
-			if (reg0 & BMCR_FULLDPLX)
-				np->full_duplex = 1;
-			else
-				np->full_duplex = 0;
-		}
-		printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
-		       dev->name,
-		       np->speed100 ? "100" : "10",
-		       np->full_duplex ? "full" : "half");
-
-		new_tx_mode = np->tx_mode & ~0x2;	/* duplex setting */
-		if (np->full_duplex)
-			new_tx_mode |= 2;
-		if (np->tx_mode != new_tx_mode) {
-			np->tx_mode = new_tx_mode;
-			writel(np->tx_mode | 0x8000, ioaddr + TxMode);
-			udelay(1000);
-			writel(np->tx_mode, ioaddr + TxMode);
-		}
-	} else {
-		printk(KERN_DEBUG "%s: Link is down\n", dev->name);
-	}
-}
-
-
 static void netdev_error(struct net_device *dev, int intr_status)
 {
-	struct netdev_private *np = dev->priv;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
 
-	/* Came close to underrunning the Tx FIFO, increase threshold. */
-	if (intr_status & IntrTxDataLow) {
-		writel(++np->tx_threshold, dev->base_addr + TxThreshold);
-		printk(KERN_NOTICE "%s: Increasing Tx FIFO threshold to %d bytes\n",
-		       dev->name, np->tx_threshold * 16);
-	}
-	if (intr_status & IntrRxGFPDead) {
-		np->stats.rx_fifo_errors++;
-		np->stats.rx_errors++;
+	if (intr_status & LinkChange) {
+		int phy_num = np->phys[0];
+		if (np->msg_level & NETIF_MSG_LINK)
+			printk(KERN_NOTICE "%s: Link changed: Autonegotiation advertising"
+				   " %4.4x  partner %4.4x.\n", dev->name,
+				   mdio_read(dev, phy_num, 4),
+				   mdio_read(dev, phy_num, 5));
+		/* Clear sticky bit. */
+		mdio_read(dev, phy_num, 1);
+		/* If link beat has returned... */
+		if (mdio_read(dev, phy_num, 1) & 0x0004)
+			netif_link_up(dev);
+		else
+			netif_link_down(dev);
+		check_duplex(dev);
 	}
-	if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
-		np->stats.tx_fifo_errors++;
-		np->stats.tx_errors++;
+	if (intr_status & StatsMax) {
+		get_stats(dev);
 	}
-	if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
+	/* Came close to underrunning the Tx FIFO, increase threshold. */
+	if (intr_status & IntrTxDataLow)
+		writel(++np->tx_threshold, dev->base_addr + TxThreshold);
+	/* Ingore expected normal events, and handled abnormal events. */
+	if ((intr_status &
+		 ~(IntrAbnormalSummary|LinkChange|StatsMax|IntrTxDataLow| 0xFF01))
+		&& (np->msg_level & NETIF_MSG_DRV))
 		printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
-		       dev->name, intr_status);
+			   dev->name, intr_status);
+	/* Hmmmmm, it's not clear how to recover from PCI faults. */
+	if (intr_status & IntrTxPCIErr)
+		np->stats.tx_fifo_errors++;
+	if (intr_status & IntrRxPCIErr)
+		np->stats.rx_fifo_errors++;
 }
 
-
 static struct net_device_stats *get_stats(struct net_device *dev)
 {
 	long ioaddr = dev->base_addr;
-	struct netdev_private *np = dev->priv;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
 
 	/* This adapter architecture needs no SMP locks. */
+#if LINUX_VERSION_CODE > 0x20119
 	np->stats.tx_bytes = readl(ioaddr + 0x57010);
 	np->stats.rx_bytes = readl(ioaddr + 0x57044);
+#endif
 	np->stats.tx_packets = readl(ioaddr + 0x57000);
 	np->stats.tx_aborted_errors =
 		readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
 	np->stats.tx_window_errors = readl(ioaddr + 0x57018);
-	np->stats.collisions =
-		readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
+	np->stats.collisions = readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
 
 	/* The chip only need report frame silently dropped. */
-	np->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
+	np->stats.rx_dropped	   += readw(ioaddr + RxDMAStatus);
 	writew(0, ioaddr + RxDMAStatus);
-	np->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
+	np->stats.rx_crc_errors	   = readl(ioaddr + 0x5703C);
 	np->stats.rx_frame_errors = readl(ioaddr + 0x57040);
 	np->stats.rx_length_errors = readl(ioaddr + 0x57058);
 	np->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
@@ -1644,29 +1225,53 @@
 	return &np->stats;
 }
 
-
-/* Chips may use the upper or lower CRC bits, and may reverse and/or invert
+/* The little-endian AUTODIN II ethernet CRC calculations.
+   A big-endian version is also available.
+   This is slow but compact code.  Do not use this routine for bulk data,
+   use a table-based routine instead.
+   This is common code and should be moved to net/core/crc.c.
+   Chips may use the upper or lower CRC bits, and may reverse and/or invert
    them.  Select the endian-ness that results in minimal calculations.
 */
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+static inline unsigned ether_crc_le(int length, unsigned char *data)
+{
+	unsigned int crc = ~0;	/* Initial value. */
+	while(--length >= 0) {
+		unsigned char current_octet = *data++;
+		int bit;
+		for (bit = 8; --bit >= 0; current_octet >>= 1) {
+			if ((crc ^ current_octet) & 1) {
+				crc >>= 1;
+				crc ^= ethernet_polynomial_le;
+			} else
+				crc >>= 1;
+		}
+	}
+	return crc;
+}
 
 static void set_rx_mode(struct net_device *dev)
 {
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
 	long ioaddr = dev->base_addr;
 	u32 rx_mode;
 	struct dev_mc_list *mclist;
 	int i;
 
-	if (dev->flags & IFF_PROMISC) {	/* Set promiscuous. */
+	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
+		/* Unconditionally log net taps. */
+		printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
 		rx_mode = AcceptBroadcast|AcceptAllMulticast|AcceptAll|AcceptMyPhys;
-	} else if ((dev->mc_count > multicast_filter_limit)
-		   || (dev->flags & IFF_ALLMULTI)) {
+	} else if ((dev->mc_count > np->multicast_filter_limit)
+			   ||  (dev->flags & IFF_ALLMULTI)) {
 		/* Too many to match, or accept all multicasts. */
 		rx_mode = AcceptBroadcast|AcceptAllMulticast|AcceptMyPhys;
 	} else if (dev->mc_count <= 15) {
-		/* Use the 16 element perfect filter, skip first entry. */
-		long filter_addr = ioaddr + PerfFilterTable + 1 * 16;
-		for (i = 1, mclist = dev->mc_list; mclist && i <= dev->mc_count;
-		     i++, mclist = mclist->next) {
+		/* Use the 16 element perfect filter. */
+		long filter_addr = ioaddr + 0x56000 + 1*16;
+		for (i = 1, mclist = dev->mc_list; mclist  &&  i <= dev->mc_count;
+			 i++, mclist = mclist->next) {
 			u16 *eaddrs = (u16 *)mclist->dmi_addr;
 			writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 4;
 			writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
@@ -1681,214 +1286,81 @@
 	} else {
 		/* Must use a multicast hash table. */
 		long filter_addr;
-		u16 mc_filter[32] __attribute__ ((aligned(sizeof(long))));	/* Multicast hash filter */
+		u16 mc_filter[32];			/* Multicast hash filter */
 
 		memset(mc_filter, 0, sizeof(mc_filter));
 		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
-		     i++, mclist = mclist->next) {
-			int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23;
-			__u32 *fptr = (__u32 *) &mc_filter[(bit_nr >> 4) & ~1];
-
-			*fptr |= cpu_to_le32(1 << (bit_nr & 31));
+			 i++, mclist = mclist->next) {
+			set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23, mc_filter);
 		}
-		/* Clear the perfect filter list, skip first entry. */
-		filter_addr = ioaddr + PerfFilterTable + 1 * 16;
+		/* Clear the perfect filter list. */
+		filter_addr = ioaddr + 0x56000 + 1*16;
 		for (i = 1; i < 16; i++) {
 			writew(0xffff, filter_addr); filter_addr += 4;
 			writew(0xffff, filter_addr); filter_addr += 4;
 			writew(0xffff, filter_addr); filter_addr += 8;
 		}
-		for (filter_addr = ioaddr + HashTable, i=0; i < 32; filter_addr+= 16, i++)
+		for (filter_addr=ioaddr + 0x56100, i=0; i < 32; filter_addr+= 16, i++){
+			np->mc_filter[i] = mc_filter[i];
 			writew(mc_filter[i], filter_addr);
+		}
 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
 	}
 	writel(rx_mode, ioaddr + RxFilterMode);
 }
 
-
-static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
-{
-	struct ethtool_cmd ecmd;
-	struct netdev_private *np = dev->priv;
-
-	if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
-		return -EFAULT;
-
-	switch (ecmd.cmd) {
-	case ETHTOOL_GSET:
-		ecmd.supported =
-			SUPPORTED_10baseT_Half |
-			SUPPORTED_10baseT_Full |
-			SUPPORTED_100baseT_Half |
-			SUPPORTED_100baseT_Full |
-			SUPPORTED_Autoneg |
-			SUPPORTED_MII;
-
-		ecmd.advertising = ADVERTISED_MII;
-		if (np->advertising & ADVERTISE_10HALF)
-			ecmd.advertising |= ADVERTISED_10baseT_Half;
-		if (np->advertising & ADVERTISE_10FULL)
-			ecmd.advertising |= ADVERTISED_10baseT_Full;
-		if (np->advertising & ADVERTISE_100HALF)
-			ecmd.advertising |= ADVERTISED_100baseT_Half;
-		if (np->advertising & ADVERTISE_100FULL)
-			ecmd.advertising |= ADVERTISED_100baseT_Full;
-		if (np->autoneg) {
-			ecmd.advertising |= ADVERTISED_Autoneg;
-			ecmd.autoneg = AUTONEG_ENABLE;
-		} else
-			ecmd.autoneg = AUTONEG_DISABLE;
-
-		ecmd.port = PORT_MII;
-		ecmd.transceiver = XCVR_INTERNAL;
-		ecmd.phy_address = np->phys[0];
-		ecmd.speed = np->speed100 ? SPEED_100 : SPEED_10;
-		ecmd.duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
-		ecmd.maxtxpkt = TX_RING_SIZE;
-		ecmd.maxrxpkt = np->intr_mitigation; /* not 100% accurate */
-
-
-		if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
-			return -EFAULT;
-		return 0;
-
-	case ETHTOOL_SSET: {
-		u16 autoneg, speed100, full_duplex;
-
-		autoneg = (ecmd.autoneg == AUTONEG_ENABLE);
-		speed100 = (ecmd.speed == SPEED_100);
-		full_duplex = (ecmd.duplex == DUPLEX_FULL);
-
-		np->autoneg = autoneg;
-		if (speed100 != np->speed100 ||
-		    full_duplex != np->full_duplex) {
-			np->speed100 = speed100;
-			np->full_duplex = full_duplex;
-			/* change advertising bits */
-			np->advertising &= ~(ADVERTISE_10HALF |
-					     ADVERTISE_10FULL |
-					     ADVERTISE_100HALF |
-					     ADVERTISE_100FULL |
-					     ADVERTISE_100BASE4);
-			if (speed100) {
-				if (full_duplex)
-					np->advertising |= ADVERTISE_100FULL;
-				else
-					np->advertising |= ADVERTISE_100HALF;
-			} else {
-				if (full_duplex)
-					np->advertising |= ADVERTISE_10FULL;
-				else
-					np->advertising |= ADVERTISE_10HALF;
-			}
-		}
-		check_duplex(dev);
-		return 0;
-	}
-
-	case ETHTOOL_GDRVINFO: {
-		struct ethtool_drvinfo info;
-		memset(&info, 0, sizeof(info));
-		info.cmd = ecmd.cmd;
-		strcpy(info.driver, DRV_NAME);
-		strcpy(info.version, DRV_VERSION);
-		*info.fw_version = 0;
-		strcpy(info.bus_info, PCI_SLOT_NAME(np->pci_dev));
-		if (copy_to_user(useraddr, &info, sizeof(info)))
-		       return -EFAULT;
-		return 0;
-	}
-
-	/* restart autonegotiation */
-	case ETHTOOL_NWAY_RST: {
-		int tmp;
-		int r = -EINVAL;
-		/* if autoneg is off, it's an error */
-		tmp = mdio_read(dev, np->phys[0], MII_BMCR);
-		if (tmp & BMCR_ANENABLE) {
-			tmp |= (BMCR_ANRESTART);
-			mdio_write(dev, np->phys[0], MII_BMCR, tmp);
-			r = 0;
-		}
-		return r;
-	}
-	/* get link status */
-	case ETHTOOL_GLINK: {
-		struct ethtool_value edata = {ETHTOOL_GLINK};
-		if (mdio_read(dev, np->phys[0], MII_BMSR) & BMSR_LSTATUS)
-			edata.data = 1;
-		else
-			edata.data = 0;
-		if (copy_to_user(useraddr, &edata, sizeof(edata)))
-			return -EFAULT;
-		return 0;
-	}
-
-	/* get message-level */
-	case ETHTOOL_GMSGLVL: {
-		struct ethtool_value edata = {ETHTOOL_GMSGLVL};
-		edata.data = debug;
-		if (copy_to_user(useraddr, &edata, sizeof(edata)))
-			return -EFAULT;
-		return 0;
-	}
-	/* set message-level */
-	case ETHTOOL_SMSGLVL: {
-		struct ethtool_value edata;
-		if (copy_from_user(&edata, useraddr, sizeof(edata)))
-			return -EFAULT;
-		debug = edata.data;
-		return 0;
-	}
-	default:
-		return -EOPNOTSUPP;
-	}
-}
-
-
-static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+/*
+  Handle user-level ioctl() calls.
+  We must use two numeric constants as the key because some clueless person
+  changed the value for the symbolic name.
+*/
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
-	struct netdev_private *np = dev->priv;
-	struct mii_ioctl_data *data = (struct mii_ioctl_data *)&rq->ifr_data;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	u16 *data = (u16 *)&rq->ifr_data;
+	u32 *data32 = (void *)&rq->ifr_data;
 
 	switch(cmd) {
-	case SIOCETHTOOL:
-		return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
-
-	/* Legacy mii-diag interface */
-	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
-	case SIOCDEVPRIVATE:		/* for binary compat, remove in 2.5 */
-		data->phy_id = np->phys[0] & 0x1f;
+	case 0x8947: case 0x89F0:
+		/* SIOCGMIIPHY: Get the address of the PHY in use. */
+		data[0] = np->phys[0] & 0x1f;
 		/* Fall Through */
-
-	case SIOCGMIIREG:		/* Read MII PHY register. */
-	case SIOCDEVPRIVATE+1:		/* for binary compat, remove in 2.5 */
-		data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
+	case 0x8948: case 0x89F1:
+		/* SIOCGMIIREG: Read the specified MII register. */
+		data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
 		return 0;
-
-	case SIOCSMIIREG:		/* Write MII PHY register. */
-	case SIOCDEVPRIVATE+2:		/* for binary compat, remove in 2.5 */
+	case 0x8949: case 0x89F2:
+		/* SIOCSMIIREG: Write the specified MII register */
 		if (!capable(CAP_NET_ADMIN))
 			return -EPERM;
-		if (data->phy_id == np->phys[0]) {
-			u16 value = data->val_in;
-			switch (data->reg_num) {
+		if (data[0] == np->phys[0]) {
+			u16 value = data[2];
+			switch (data[1]) {
 			case 0:
-				if (value & (BMCR_RESET | BMCR_ANENABLE))
-					/* Autonegotiation. */
-					np->autoneg = 1;
-				else {
-					np->full_duplex = (value & BMCR_FULLDPLX) ? 1 : 0;
-					np->autoneg = 0;
-				}
-				break;
-			case 4:
-				np->advertising = value;
+				/* Check for autonegotiation on or reset. */
+				np->medialock = (value & 0x9000) ? 0 : 1;
+				if (np->medialock)
+					np->full_duplex = (value & 0x0100) ? 1 : 0;
 				break;
+			case 4: np->advertising = value; break;
 			}
 			check_duplex(dev);
 		}
-		mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
+		mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+		return 0;
+	case SIOCGPARAMS:
+		data32[0] = np->msg_level;
+		data32[1] = np->multicast_filter_limit;
+		data32[2] = np->max_interrupt_work;
+		data32[3] = np->rx_copybreak;
+		return 0;
+	case SIOCSPARAMS:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		np->msg_level = data32[0];
+		np->multicast_filter_limit = data32[1];
+		np->max_interrupt_work = data32[2];
+		np->rx_copybreak = data32[3];
 		return 0;
 	default:
 		return -EOPNOTSUPP;
@@ -1898,13 +1370,12 @@
 static int netdev_close(struct net_device *dev)
 {
 	long ioaddr = dev->base_addr;
-	struct netdev_private *np = dev->priv;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
 	int i;
 
-	netif_stop_queue(dev);
-	netif_stop_if(dev);
+	netif_stop_tx_queue(dev);
 
-	if (debug > 1) {
+	if (np->msg_level & NETIF_MSG_IFDOWN) {
 		printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %4.4x.\n",
 			   dev->name, (int)readl(ioaddr + IntrStatus));
 		printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
@@ -1915,22 +1386,24 @@
 	writel(0, ioaddr + IntrEnable);
 
 	/* Stop the chip's Tx and Rx processes. */
+	writel(0, ioaddr + GenCtrl);
+
+	del_timer(&np->timer);
 
 #ifdef __i386__
-	if (debug > 2) {
+	if (np->msg_level & NETIF_MSG_IFDOWN) {
 		printk("\n"KERN_DEBUG"  Tx ring at %8.8x:\n",
-			   np->tx_ring_dma);
+			   (int)virt_to_bus(np->tx_ring));
 		for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
 			printk(KERN_DEBUG " #%d desc. %8.8x %8.8x -> %8.8x.\n",
-			       i, le32_to_cpu(np->tx_ring[i].status),
-			       le32_to_cpu(np->tx_ring[i].first_addr),
-			       le32_to_cpu(np->tx_done_q[i].status));
+				   i, np->tx_ring[i].status, np->tx_ring[i].addr,
+				   np->tx_done_q[i].status);
 		printk(KERN_DEBUG "  Rx ring at %8.8x -> %p:\n",
-		       np->rx_ring_dma, np->rx_done_q);
+			   (int)virt_to_bus(np->rx_ring), np->rx_done_q);
 		if (np->rx_done_q)
 			for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
 				printk(KERN_DEBUG " #%d desc. %8.8x -> %8.8x\n",
-				       i, le32_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
+					   i, np->rx_ring[i].rxaddr, np->rx_done_q[i].status);
 		}
 	}
 #endif /* __i386__ debugging only */
@@ -1939,111 +1412,124 @@
 
 	/* Free all the skbuffs in the Rx queue. */
 	for (i = 0; i < RX_RING_SIZE; i++) {
-		np->rx_ring[i].rxaddr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
-		if (np->rx_info[i].skb != NULL) {
-			pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
-			dev_kfree_skb(np->rx_info[i].skb);
+		np->rx_ring[i].rxaddr = 0xBADF00D0; /* An invalid address. */
+		if (np->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+			np->rx_skbuff[i]->free = 1;
+#endif
+			dev_free_skb(np->rx_skbuff[i]);
 		}
-		np->rx_info[i].skb = NULL;
-		np->rx_info[i].mapping = 0;
+		np->rx_skbuff[i] = 0;
 	}
 	for (i = 0; i < TX_RING_SIZE; i++) {
-		struct sk_buff *skb = np->tx_info[i].skb;
-#ifdef ZEROCOPY
-		int j;
-#endif /* ZEROCOPY */
-		if (skb == NULL)
-			continue;
-		pci_unmap_single(np->pci_dev,
-				 np->tx_info[i].first_mapping,
-				 skb_first_frag_len(skb), PCI_DMA_TODEVICE);
-		np->tx_info[i].first_mapping = 0;
-		dev_kfree_skb(skb);
-		np->tx_info[i].skb = NULL;
-#ifdef ZEROCOPY
-		for (j = 0; j < MAX_STARFIRE_FRAGS; j++)
-			if (np->tx_info[i].frag_mapping[j]) {
-				pci_unmap_single(np->pci_dev,
-						 np->tx_info[i].frag_mapping[j],
-						 skb_shinfo(skb)->frags[j].size,
-						 PCI_DMA_TODEVICE);
-				np->tx_info[i].frag_mapping[j] = 0;
-			} else
-				break;
-#endif /* ZEROCOPY */
+		if (np->tx_skbuff[i])
+			dev_free_skb(np->tx_skbuff[i]);
+		np->tx_skbuff[i] = 0;
 	}
 
-	COMPAT_MOD_DEC_USE_COUNT;
+	MOD_DEC_USE_COUNT;
 
 	return 0;
 }
 
 
-static void __devexit starfire_remove_one (struct pci_dev *pdev)
+static int starfire_pwr_event(void *dev_instance, int event)
 {
-	struct net_device *dev = pci_get_drvdata(pdev);
-	struct netdev_private *np;
+	struct net_device *dev = dev_instance;
+	struct netdev_private *np = (struct netdev_private *)dev->priv;
+	long ioaddr = dev->base_addr;
 
-	if (!dev)
-		BUG();
+	if (np->msg_level & NETIF_MSG_LINK)
+		printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
+	switch(event) {
+	case DRV_ATTACH:
+		MOD_INC_USE_COUNT;
+		break;
+	case DRV_SUSPEND:
+		/* Disable interrupts, stop Tx and Rx. */
+		writel(0x0000, ioaddr + IntrEnable);
+		writel(0, ioaddr + GenCtrl);
+		break;
+	case DRV_RESUME:
+		/* This is incomplete: we must factor start_chip() out of open(). */
+		writel(np->tx_threshold, ioaddr + TxThreshold);
+		writel(interrupt_mitigation, ioaddr + IntrTimerCtrl);
+		set_rx_mode(dev);
+		writel(np->intr_enable, ioaddr + IntrEnable);
+		writel(TxEnable|RxEnable, ioaddr + GenCtrl);
+		break;
+	case DRV_DETACH: {
+		struct net_device **devp, **next;
+		if (dev->flags & IFF_UP) {
+			/* Some, but not all, kernel versions close automatically. */
+			dev_close(dev);
+			dev->flags &= ~(IFF_UP|IFF_RUNNING);
+		}
+		unregister_netdev(dev);
+		release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
+#ifndef USE_IO_OPS
+		iounmap((char *)dev->base_addr);
+#endif
+		for (devp = &root_net_dev; *devp; devp = next) {
+			next = &((struct netdev_private *)(*devp)->priv)->next_module;
+			if (*devp == dev) {
+				*devp = *next;
+				break;
+			}
+		}
+		if (np->priv_addr)
+			kfree(np->priv_addr);
+		kfree(dev);
+		MOD_DEC_USE_COUNT;
+		break;
+	}
+	}
 
-	np = dev->priv;
-	if (np->tx_done_q)
-		pci_free_consistent(pdev, PAGE_SIZE,
-				    np->tx_done_q, np->tx_done_q_dma);
-	if (np->rx_done_q)
-		pci_free_consistent(pdev,
-				    sizeof(struct rx_done_desc) * DONE_Q_SIZE,
-				    np->rx_done_q, np->rx_done_q_dma);
-	if (np->tx_ring)
-		pci_free_consistent(pdev, PAGE_SIZE,
-				    np->tx_ring, np->tx_ring_dma);
-	if (np->rx_ring)
-		pci_free_consistent(pdev, PAGE_SIZE,
-				    np->rx_ring, np->rx_ring_dma);
-
-	unregister_netdev(dev);
-	iounmap((char *)dev->base_addr);
-	pci_release_regions(pdev);
-
-	pci_set_drvdata(pdev, NULL);
-	kfree(dev);			/* Will also free np!! */
+	return 0;
 }
 
-
-static struct pci_driver starfire_driver = {
-	name:		DRV_NAME,
-	probe:		starfire_init_one,
-	remove:		__devexit_p(starfire_remove_one),
-	id_table:	starfire_pci_tbl,
-};
-
-
-static int __init starfire_init (void)
-{
-/* when a module, this is printed whether or not devices are found in probe */
+
 #ifdef MODULE
-	printk(version);
-#endif
-	return pci_module_init (&starfire_driver);
-}
-
-
-static void __exit starfire_cleanup (void)
+int init_module(void)
 {
-	pci_unregister_driver (&starfire_driver);
+	if (debug >= NETIF_MSG_DRV)	/* Emit version even if no cards detected. */
+		printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+	if (pci_drv_register(&starfire_drv_id, NULL)) {
+		printk(KERN_INFO " No Starfire adapters detected, driver not loaded.\n");
+		return -ENODEV;
+	}
+	return 0;
 }
 
+void cleanup_module(void)
+{
+	struct net_device *next_dev;
 
-module_init(starfire_init);
-module_exit(starfire_cleanup);
+	pci_drv_unregister(&starfire_drv_id);
 
+	/* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+	while (root_net_dev) {
+		struct netdev_private *np = (void *)(root_net_dev->priv);
+		unregister_netdev(root_net_dev);
+		iounmap((char *)(root_net_dev->base_addr));
+		next_dev = np->next_module;
+		if (np->tx_done_q) free_page((long)np->tx_done_q);
+		if (np->rx_done_q) free_page((long)np->rx_done_q);
+		if (np->priv_addr) kfree(np->priv_addr);
+		kfree(root_net_dev);
+		root_net_dev = next_dev;
+	}
+}
 
+#endif  /* MODULE */
+
 /*
  * Local variables:
- *  compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O2 -c starfire.c"
- *  simple-compile-command: "gcc -DMODULE -O2 -c starfire.c"
- *  c-basic-offset: 8
- *  tab-width: 8
+ *  compile-command: "make KERNVER=`uname -r` starfire.o"
+ *  compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c starfire.c"
+ *  simple-compile-command: "gcc -DMODULE -O6 -c starfire.c"
+ *  c-indent-level: 4
+ *  c-basic-offset: 4
+ *  tab-width: 4
  * End:
  */
diff -uNr net/drivers/net/tulip.c linux-2.4.20/drivers/net/tulip.c
--- net/drivers/net/tulip.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.4.20/drivers/net/tulip.c	2003-01-14 20:29:36.000000000 -0500
@@ -0,0 +1,3657 @@
+/* tulip.c: A DEC 21040 family ethernet driver for Linux. */
+/*
+	Written/copyright 1994-2002 by Donald Becker.
+
+	This software may be used and distributed according to the terms of
+	the GNU General Public License (GPL), incorporated herein by reference.
+	Drivers based on or derived from this code fall under the GPL and must
+	retain the authorship, copyright and license notice.  This file is not
+	a complete program and may only be used when the entire operating
+	system is licensed under the GPL.
+
+	This driver is for the Digital "Tulip" Ethernet adapter interface.
+	It should work with most DEC 21*4*-based chips/ethercards, as well as
+	with work-alike chips from Lite-On (PNIC) and Macronix (MXIC) and ASIX.
+
+	The author may be reached as becker@scyld.com, or C/O
+	Scyld Computing Corporation
+	410 Severn Ave., Suite 210
+	Annapolis MD 21403
+
+	Support and updates available at
+	http://www.scyld.com/network/tulip.html
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"tulip.c:v0.95f 11/17/2002  Written by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+"  http://www.scyld.com/network/tulip.html\n";
+
+#define SMP_CHECK
+
+/* The user-configurable values.
+   These may be modified when a driver module is loaded.*/
+
+static int debug = 2;			/* Message enable: 0..31 = no..all messages. */
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 25;
+
+#define MAX_UNITS 8
+/* Used to pass the full-duplex flag, etc. */
+static int full_duplex[MAX_UNITS] = {0, };
+static int options[MAX_UNITS] = {0, };
+static int mtu[MAX_UNITS] = {0, };			/* Jumbo MTU for interfaces. */
+
+/*  The possible media types that can be set in options[] are: */
+#define MEDIA_MASK 31
+static const char * const medianame[32] = {
+	"10baseT", "10base2", "AUI", "100baseTx",
+	"10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
+	"100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
+	"10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
+	"MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
+	"","","","", "","","","",  "","","","Transceiver reset",
+};
+
+/* Set if the PCI BIOS detects the chips on a multiport board backwards. */
+#ifdef REVERSE_PROBE_ORDER
+static int reverse_probe = 1;
+#else
+static int reverse_probe = 0;
+#endif
+
+/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
+#ifdef __alpha__				/* Always copy to aligned IP headers. */
+static int rx_copybreak = 1518;
+#else
+static int rx_copybreak = 100;
+#endif
+
+/*
+  Set the bus performance register.
+	Typical: Set 16 longword cache alignment, no burst limit.
+	Cache alignment bits 15:14	     Burst length 13:8
+		0000	No alignment  0x00000000 unlimited		0800 8 longwords
+		4000	8  longwords		0100 1 longword		1000 16 longwords
+		8000	16 longwords		0200 2 longwords	2000 32 longwords
+		C000	32  longwords		0400 4 longwords
+	Warning: many older 486 systems are broken and require setting 0x00A04800
+	   8 longword cache alignment, 8 longword burst.
+	ToDo: Non-Intel setting could be better.
+*/
+
+#if defined(__alpha__) || defined(__x86_64)
+static int csr0 = 0x01A00000 | 0xE000;
+#elif defined(__i386__) || defined(__powerpc__) || defined(__sparc__)
+/* Do *not* rely on hardware endian correction for big-endian machines! */
+static int csr0 = 0x01A00000 | 0x8000;
+#else
+#warning Processor architecture undefined!
+static int csr0 = 0x00A00000 | 0x4800;
+#endif
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+   Typical is a 64 element hash table based on the Ethernet CRC.
+   This value does not apply to the 512 bit table chips.
+*/
+static int multicast_filter_limit = 32;
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the descriptor ring sizes a power of two for efficiency.
+   The Tx queue length limits transmit packets to a portion of the available
+   ring entries.  It should be at least one element less to allow multicast
+   filter setup frames to be queued.  It must be at least four for hysteresis.
+   Making the Tx queue too long decreases the effectiveness of channel
+   bonding and packet priority.
+   Large receive rings merely consume memory. */
+#define TX_RING_SIZE	16
+#define TX_QUEUE_LEN	10
+#define RX_RING_SIZE	32
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT  (4*HZ)
+#define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
+/* This is a mysterious value that can be written to CSR11 in the 21040 (only)
+   to support a pre-NWay full-duplex signaling mechanism using short frames.
+   No one knows what it should be, but if left at its default value some
+   10base2(!) packets trigger a full-duplex-request interrupt. */
+#define FULL_DUPLEX_MAGIC	0x6969
+
+/* The include file section.  We start by doing checks and fix-ups for
+   missing compile flags. */
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning  You must compile this file with the correct options!
+#warning  See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+#include <linux/config.h>
+#if defined(CONFIG_SMP)  &&  ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(CONFIG_MODVERSIONS) && defined(MODULE) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h>		/* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/unaligned.h>
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+/* Condensed operations for readability. */
+#define virt_to_le32desc(addr)  cpu_to_le32(virt_to_bus(addr))
+
+#if (LINUX_VERSION_CODE >= 0x20100)  &&  defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(reverse_probe, "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(csr0, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(multicast_filter_limit, "i");
+#ifdef MODULE_PARM_DESC
+MODULE_PARM_DESC(debug, "Tulip driver message level (0-31)");
+MODULE_PARM_DESC(options,
+				 "Tulip: force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(max_interrupt_work,
+				 "Tulip driver maximum events handled per interrupt");
+MODULE_PARM_DESC(full_duplex, "Tulip: non-zero to set forced full duplex.");
+MODULE_PARM_DESC(rx_copybreak,
+				 "Tulip breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+				 "Tulip breakpoint for switching to Rx-all-multicast");
+MODULE_PARM_DESC(reverse_probe, "Search PCI devices in reverse order to work "
+				 "around misordered multiport NICS.");
+MODULE_PARM_DESC(csr0, "Special setting for the CSR0 PCI bus parameter "
+				 "register.");
+#endif
+
+/* This driver was originally written to use I/O space access, but now
+   uses memory space by default. Override this this with -DUSE_IO_OPS. */
+#if (LINUX_VERSION_CODE < 0x20100)  ||  ! defined(MODULE)
+#define USE_IO_OPS
+#endif
+#ifndef USE_IO_OPS
+#undef inb
+#undef inw
+#undef inl
+#undef outb
+#undef outw
+#undef outl
+#define inb readb
+#define inw readw
+#define inl readl
+#define outb writeb
+#define outw writew
+#define outl writel
+#endif
+
+/*
+				Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the DECchip "Tulip", Digital's
+single-chip ethernet controllers for PCI.  Supported members of the family
+are the 21040, 21041, 21140, 21140A, 21142, and 21143.  Similar work-alike
+chips from Lite-On, Macronics, ASIX, Compex and other listed below are also
+supported.
+
+These chips are used on at least 140 unique PCI board designs.  The great
+number of chips and board designs supported is the reason for the
+driver size and complexity.  Almost of the increasing complexity is in the
+board configuration and media selection code.  There is very little
+increasing in the operational critical path length.
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board.  The system BIOS preferably should assign the
+PCI INTA signal to an otherwise unused system IRQ line.
+
+Some boards have EEPROMs tables with default media entry.  The factory default
+is usually "autoselect".  This should only be overridden when using
+transceiver connections without link beat e.g. 10base2 or AUI, or (rarely!)
+for forcing full-duplex when used with old link partners that do not do
+autonegotiation.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+The Tulip can use either ring buffers or lists of Tx and Rx descriptors.
+This driver uses statically allocated rings of Rx and Tx descriptors, set at
+compile time by RX/TX_RING_SIZE.  This version of the driver allocates skbuffs
+for the Rx ring buffers at open() time and passes the skb->data field to the
+Tulip as receive data buffers.  When an incoming frame is less than
+RX_COPYBREAK bytes long, a fresh skbuff is allocated and the frame is
+copied to the new skbuff.  When the incoming frame is larger, the skbuff is
+passed directly up the protocol stack and replaced by a newly allocated
+skbuff.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames.  For small frames the copying cost is negligible (esp. considering
+that we are pre-loading the cache with immediately useful header
+information).  For large frames the copying cost is non-trivial, and the
+larger copy might flush the cache of useful data.  A subtle aspect of this
+choice is that the Tulip only receives into longword aligned buffers, thus
+the IP header at offset 14 is not longword aligned for further processing.
+Copied frames are put into the new skbuff at an offset of "+2", thus copying
+has the beneficial effect of aligning the IP header and preloading the
+cache.
+
+IIIC. Synchronization
+The driver runs as two independent, single-threaded flows of control.  One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag.  The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag.  It sets the tbusy flag whenever it is queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'tp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring.  (The Tx-done interrupt can not be selectively turned off, so
+we cannot avoid the interrupt overhead by having the Tx routine reap the Tx
+stats.)	 After reaping the stats, it marks the queue entry as empty by setting
+the 'base' to zero.	 Iff the 'tp->tx_full' flag is set, it clears both the
+tx_full and tbusy flags.
+
+IV. Notes
+
+Thanks to Duke Kamstra of SMC for long ago providing an EtherPower board.
+Greg LaPolla at Linksys provided PNIC and other Linksys boards.
+Znyx provided a four-port card for testing.
+
+IVb. References
+
+http://scyld.com/expert/NWay.html
+http://www.digital.com  (search for current 21*4* datasheets and "21X4 SROM")
+http://www.national.com/pf/DP/DP83840A.html
+http://www.asix.com.tw/pmac.htm
+http://www.admtek.com.tw/
+
+IVc. Errata
+
+The old DEC databooks were light on details.
+The 21040 databook claims that CSR13, CSR14, and CSR15 should each be the last
+register of the set CSR12-15 written.  Hmmm, now how is that possible?
+
+The DEC SROM format is very badly designed not precisely defined, leading to
+part of the media selection junkheap below.  Some boards do not have EEPROM
+media tables and need to be patched up.  Worse, other boards use the DEC
+design kit media table when it is not correct for their design.
+
+We cannot use MII interrupts because there is no defined GPIO pin to attach
+them.  The MII transceiver status is polled using an kernel timer.
+
+*/
+
+static void *tulip_probe1(struct pci_dev *pdev, void *init_dev,
+						  long ioaddr, int irq, int chip_idx, int find_cnt);
+static int tulip_pwr_event(void *dev_instance, int event);
+
+#ifdef USE_IO_OPS
+#define TULIP_IOTYPE  PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0
+#define TULIP_SIZE 0x80
+#define TULIP_SIZE1 0x100
+#else
+#define TULIP_IOTYPE  PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1
+#define TULIP_SIZE   0x400		/* New PCI v2.1 recommends 4K min mem size. */
+#define TULIP_SIZE1	0x400		/* New PCI v2.1 recommends 4K min mem size. */
+#endif
+
+/* This much match tulip_tbl[]!  Note 21142 == 21143. */
+enum tulip_chips {
+	DC21040=0, DC21041=1, DC21140=2, DC21142=3, DC21143=3,
+	LC82C168, MX98713, MX98715, MX98725, AX88141, AX88140, PNIC2, COMET,
+	COMPEX9881, I21145, XIRCOM, CONEXANT,
+	/* These flags may be added to the chip type. */
+	HAS_VLAN=0x100,
+};
+
+static struct pci_id_info pci_id_tbl[] = {
+	{ "Digital DC21040 Tulip", { 0x00021011, 0xffffffff },
+	  TULIP_IOTYPE, 0x80, DC21040 },
+	{ "Digital DC21041 Tulip", { 0x00141011, 0xffffffff },
+	  TULIP_IOTYPE, 0x80, DC21041 },
+	{ "Digital DS21140A Tulip", { 0x00091011, 0xffffffff, 0,0, 0x20,0xf0 },
+	  TULIP_IOTYPE, 0x80, DC21140 },
+	{ "Digital DS21140 Tulip", { 0x00091011, 0xffffffff },
+	  TULIP_IOTYPE, 0x80, DC21140 },
+	{ "Digital DS21143-xD Tulip", { 0x00191011, 0xffffffff, 0,0, 0x40,0xf0 },
+	  TULIP_IOTYPE, TULIP_SIZE, DC21142 | HAS_VLAN },
+	{ "Digital DS21143-xC Tulip", { 0x00191011, 0xffffffff, 0,0, 0x30,0xf0 },
+	  TULIP_IOTYPE, TULIP_SIZE, DC21142 },
+	{ "Digital DS21142 Tulip", { 0x00191011, 0xffffffff },
+	  TULIP_IOTYPE, TULIP_SIZE, DC21142 },
+	{ "Kingston KNE110tx (PNIC)",
+	  { 0x000211AD, 0xffffffff, 0xf0022646, 0xffffffff },
+	  TULIP_IOTYPE, 256, LC82C168 },
+	{ "Linksys LNE100TX (82c168 PNIC)",				/*  w/SYM */
+	  { 0x000211AD, 0xffffffff, 0xffff11ad, 0xffffffff, 17,0xff },
+	  TULIP_IOTYPE, 256, LC82C168 },
+	{ "Linksys LNE100TX (82c169 PNIC)",				/* w/ MII */
+	  { 0x000211AD, 0xffffffff, 0xf00311ad, 0xffffffff, 32,0xff },
+	  TULIP_IOTYPE, 256, LC82C168 },
+	{ "Lite-On 82c168 PNIC", { 0x000211AD, 0xffffffff },
+	  TULIP_IOTYPE, 256, LC82C168 },
+	{ "Macronix 98713 PMAC", { 0x051210d9, 0xffffffff },
+	  TULIP_IOTYPE, 256, MX98713 },
+	{ "Macronix 98715 PMAC", { 0x053110d9, 0xffffffff },
+	  TULIP_IOTYPE, 256, MX98715 },
+	{ "Macronix 98725 PMAC", { 0x053110d9, 0xffffffff },
+	  TULIP_IOTYPE, 256, MX98725 },
+	{ "ASIX AX88141", { 0x1400125B, 0xffffffff, 0,0, 0x10, 0xf0 },
+	  TULIP_IOTYPE, 128, AX88141 },
+	{ "ASIX AX88140", { 0x1400125B, 0xffffffff },
+	  TULIP_IOTYPE, 128, AX88140 },
+	{ "Lite-On LC82C115 PNIC-II", { 0xc11511AD, 0xffffffff },
+	  TULIP_IOTYPE, 256, PNIC2 },
+	{ "ADMtek AN981 Comet", { 0x09811317, 0xffffffff },
+	  TULIP_IOTYPE, TULIP_SIZE1, COMET },
+	{ "ADMtek Centaur-P", { 0x09851317, 0xffffffff },
+	  TULIP_IOTYPE, TULIP_SIZE1, COMET },
+	{ "ADMtek Centaur-C", { 0x19851317, 0xffffffff },
+	  TULIP_IOTYPE, TULIP_SIZE1, COMET },
+	{ "D-Link DFE-680TXD v1.0 (ADMtek Centaur-C)", { 0x15411186, 0xffffffff },
+	  TULIP_IOTYPE, TULIP_SIZE1, COMET },
+	{ "ADMtek Centaur-C (Linksys v2)", { 0xab0213d1, 0xffffffff },
+	  TULIP_IOTYPE, TULIP_SIZE1, COMET },
+	{ "ADMtek Centaur-C (Linksys)", { 0xab0313d1, 0xffffffff },
+	  TULIP_IOTYPE, TULIP_SIZE1, COMET },
+	{ "ADMtek Centaur-C (Linksys)", { 0xab0813d1, 0xffffffff },
+	  TULIP_IOTYPE, TULIP_SIZE1, COMET },
+	{ "ADMtek Centaur-C (Linksys PCM200 v3)", { 0xab091737, 0xffffffff },
+	  TULIP_IOTYPE, TULIP_SIZE1, COMET },
+	{ "STMicro STE10/100 Comet", { 0x0981104a, 0xffffffff },
+	  TULIP_IOTYPE, TULIP_SIZE1, COMET },
+	{ "STMicro STE10/100A Comet", { 0x2774104a, 0xffffffff },
+	  TULIP_IOTYPE, TULIP_SIZE1, COMET },
+	{ "ADMtek Comet-II", { 0x95111317, 0xffffffff },
+	  TULIP_IOTYPE, TULIP_SIZE1, COMET },
+	{ "ADMtek Comet-II (9513)", { 0x95131317, 0xffffffff },
+	  TULIP_IOTYPE, TULIP_SIZE1, COMET },
+	{ "SMC1255TX (ADMtek Comet)",
+	  { 0x12161113, 0xffffffff, 0x125510b8, 0xffffffff },
+	  TULIP_IOTYPE, TULIP_SIZE1, COMET },
+	{ "Accton EN1217/EN2242 (ADMtek Comet)", { 0x12161113, 0xffffffff },
+	  TULIP_IOTYPE, TULIP_SIZE1, COMET },
+	{ "SMC1255TX (ADMtek Comet-II)", { 0x125510b8, 0xffffffff },
+	  TULIP_IOTYPE, TULIP_SIZE1, COMET },
+	{ "ADMtek Comet-II (model 1020)", { 0x1020111a, 0xffffffff },
+	  TULIP_IOTYPE, TULIP_SIZE1, COMET },
+	{ "Allied Telesyn A120 (ADMtek Comet)", { 0xa1201259, 0xffffffff },
+	  TULIP_IOTYPE, TULIP_SIZE1, COMET },
+	{ "Compex RL100-TX", { 0x988111F6, 0xffffffff },
+	  TULIP_IOTYPE, 128, COMPEX9881 },
+	{ "Intel 21145 Tulip", { 0x00398086, 0xffffffff },
+	  TULIP_IOTYPE, 128, I21145 },
+	{ "Xircom Tulip clone", { 0x0003115d, 0xffffffff },
+	  TULIP_IOTYPE, 128, XIRCOM },
+	{ "Davicom DM9102", { 0x91021282, 0xffffffff },
+	  TULIP_IOTYPE, 0x80, DC21140 },
+	{ "Davicom DM9100", { 0x91001282, 0xffffffff },
+	  TULIP_IOTYPE, 0x80, DC21140 },
+	{ "Macronix mxic-98715 (EN1217)", { 0x12171113, 0xffffffff },
+	  TULIP_IOTYPE, 256, MX98715 },
+	{ "Conexant LANfinity", { 0x180314f1, 0xffffffff },
+	  TULIP_IOTYPE, TULIP_SIZE1, CONEXANT },
+	{ "3Com 3cSOHO100B-TX (ADMtek Centaur)", { 0x930010b7, 0xffffffff },
+	  TULIP_IOTYPE, TULIP_SIZE1, COMET },
+	{ 0},
+};
+
+struct drv_id_info tulip_drv_id = {
+	"tulip", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
+	tulip_probe1, tulip_pwr_event };
+
+/* This table is used during operation for capabilities and media timer. */
+
+static void tulip_timer(unsigned long data);
+static void nway_timer(unsigned long data);
+static void mxic_timer(unsigned long data);
+static void pnic_timer(unsigned long data);
+static void comet_timer(unsigned long data);
+
+enum tbl_flag {
+	HAS_MII=1, HAS_MEDIA_TABLE=2, CSR12_IN_SROM=4, ALWAYS_CHECK_MII=8,
+	HAS_PWRDWN=0x10, MC_HASH_ONLY=0x20, /* Hash-only multicast filter. */
+	HAS_PNICNWAY=0x80, HAS_NWAY=0x40,	/* Uses internal NWay xcvr. */
+	HAS_INTR_MITIGATION=0x100, IS_ASIX=0x200, HAS_8023X=0x400,
+	COMET_MAC_ADDR=0x0800,
+};
+
+/* Note: this table must match  enum tulip_chips  above. */
+static struct tulip_chip_table {
+	char *chip_name;
+	int io_size;				/* Unused */
+	int valid_intrs;			/* CSR7 interrupt enable settings */
+	int flags;
+	void (*media_timer)(unsigned long data);
+} tulip_tbl[] = {
+  { "Digital DC21040 Tulip", 128, 0x0001ebef, 0, tulip_timer },
+  { "Digital DC21041 Tulip", 128, 0x0001ebff,
+	HAS_MEDIA_TABLE | HAS_NWAY, tulip_timer },
+  { "Digital DS21140 Tulip", 128, 0x0001ebef,
+	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, tulip_timer },
+  { "Digital DS21143 Tulip", 128, 0x0801fbff,
+	HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_PWRDWN | HAS_NWAY
+	| HAS_INTR_MITIGATION, nway_timer },
+  { "Lite-On 82c168 PNIC", 256, 0x0001ebef,
+	HAS_MII | HAS_PNICNWAY, pnic_timer },
+  { "Macronix 98713 PMAC", 128, 0x0001ebef,
+	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer },
+  { "Macronix 98715 PMAC", 256, 0x0001ebef,
+	HAS_MEDIA_TABLE, mxic_timer },
+  { "Macronix 98725 PMAC", 256, 0x0001ebef,
+	HAS_MEDIA_TABLE, mxic_timer },
+  { "ASIX AX88140", 128, 0x0001fbff,
+	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY | IS_ASIX, tulip_timer },
+  { "ASIX AX88141", 128, 0x0001fbff,
+	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY | IS_ASIX, tulip_timer },
+  { "Lite-On PNIC-II", 256, 0x0801fbff,
+	HAS_MII | HAS_NWAY | HAS_8023X, nway_timer },
+  { "ADMtek Comet", 256, 0x0001abef,
+	HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer },
+  { "Compex 9881 PMAC", 128, 0x0001ebef,
+	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer },
+  { "Intel DS21145 Tulip", 128, 0x0801fbff,
+	HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_PWRDWN | HAS_NWAY,
+	nway_timer },
+  { "Xircom tulip work-alike", 128, 0x0801fbff,
+	HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_PWRDWN | HAS_NWAY,
+	nway_timer },
+  { "Conexant LANfinity", 256, 0x0001ebef,
+	HAS_MII | HAS_PWRDWN, tulip_timer },
+  {0},
+};
+
+/* A full-duplex map for media types. */
+enum MediaIs {
+	MediaIsFD = 1, MediaAlwaysFD=2, MediaIsMII=4, MediaIsFx=8,
+	MediaIs100=16};
+static const char media_cap[32] =
+{0,0,0,16,  3,19,16,24,  27,4,7,5, 0,20,23,20,  28,31,0,0, };
+static u8 t21040_csr13[] = {2,0x0C,8,4,  4,0,0,0, 0,0,0,0, 4,0,0,0};
+
+/* 21041 transceiver register settings: 10-T, 10-2, AUI, 10-T, 10T-FD*/
+static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
+static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
+static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
+
+static u16 t21142_csr13[] = { 0x0001, 0x0009, 0x0009, 0x0000, 0x0001, };
+static u16 t21142_csr14[] = { 0xFFFF, 0x0705, 0x0705, 0x0000, 0x7F3D, };
+static u16 t21142_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
+
+/* Offsets to the Command and Status Registers, "CSRs".  All accesses
+   must be longword instructions and quadword aligned. */
+enum tulip_offsets {
+	CSR0=0,    CSR1=0x08, CSR2=0x10, CSR3=0x18, CSR4=0x20, CSR5=0x28,
+	CSR6=0x30, CSR7=0x38, CSR8=0x40, CSR9=0x48, CSR10=0x50, CSR11=0x58,
+	CSR12=0x60, CSR13=0x68, CSR14=0x70, CSR15=0x78 };
+
+/* The bits in the CSR5 status registers, mostly interrupt sources. */
+enum status_bits {
+	TimerInt=0x800, TPLnkFail=0x1000, TPLnkPass=0x10,
+	NormalIntr=0x10000, AbnormalIntr=0x8000, PCIBusError=0x2000,
+	RxJabber=0x200, RxStopped=0x100, RxNoBuf=0x80, RxIntr=0x40,
+	TxFIFOUnderflow=0x20, TxJabber=0x08, TxNoBuf=0x04, TxDied=0x02, TxIntr=0x01,
+};
+
+/* The configuration bits in CSR6. */
+enum csr6_mode_bits {
+	TxOn=0x2000, RxOn=0x0002, FullDuplex=0x0200,
+	AcceptBroadcast=0x0100, AcceptAllMulticast=0x0080,
+	AcceptAllPhys=0x0040, AcceptRunt=0x0008,
+};
+
+
+/* The Tulip Rx and Tx buffer descriptors. */
+struct tulip_rx_desc {
+	s32 status;
+	s32 length;
+	u32 buffer1, buffer2;
+};
+
+struct tulip_tx_desc {
+	s32 status;
+	s32 length;
+	u32 buffer1, buffer2;				/* We use only buffer 1.  */
+};
+
+enum desc_status_bits {
+	DescOwned=0x80000000, RxDescFatalErr=0x8000, RxWholePkt=0x0300,
+};
+
+/* Ring-wrap flag in length field, use for last ring entry.
+	0x01000000 means chain on buffer2 address,
+	0x02000000 means use the ring start address in CSR2/3.
+   Note: Some work-alike chips do not function correctly in chained mode.
+   The ASIX chip works only in chained mode.
+   Thus we indicates ring mode, but always write the 'next' field for
+   chained mode as well.
+*/
+#define DESC_RING_WRAP 0x02000000
+
+#define EEPROM_SIZE 512		/* support 256*16 EEPROMs */
+
+struct medialeaf {
+	u8 type;
+	u8 media;
+	unsigned char *leafdata;
+};
+
+struct mediatable {
+	u16 defaultmedia;
+	u8 leafcount, csr12dir;				/* General purpose pin directions. */
+	unsigned has_mii:1, has_nonmii:1, has_reset:6;
+	u32 csr15dir, csr15val;				/* 21143 NWay setting. */
+	struct medialeaf mleaf[0];
+};
+
+struct mediainfo {
+	struct mediainfo *next;
+	int info_type;
+	int index;
+	unsigned char *info;
+};
+
+#define PRIV_ALIGN	15	/* Required alignment mask */
+struct tulip_private {
+	struct tulip_rx_desc rx_ring[RX_RING_SIZE];
+	struct tulip_tx_desc tx_ring[TX_RING_SIZE];
+	/* The saved addresses of Rx/Tx-in-place packet buffers. */
+	struct sk_buff* tx_skbuff[TX_RING_SIZE];
+	struct sk_buff* rx_skbuff[RX_RING_SIZE];
+	struct net_device *next_module;
+	void *priv_addr;			/* Unaligned address of dev->priv for kfree */
+	u16 setup_frame[96];		/* Pseudo-Tx frame to init address table. */
+	u32 mc_filter[2];			/* Multicast hash filter */
+	struct pci_dev *pci_dev;
+	int chip_id, revision;
+	int flags;
+	int msg_level;
+	struct timer_list timer;			/* Media selection timer. */
+	unsigned int csr0, csr6;			/* Current CSR0, CSR6 settings. */
+	/* Note: cache line pairing and isolation of Rx vs. Tx indicies. */
+	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
+	struct net_device_stats stats;
+	unsigned int cur_tx, dirty_tx;
+	unsigned int tx_full:1;				/* The Tx queue is full. */
+	unsigned int rx_dead:1;				/* We have no Rx buffers. */
+	unsigned int full_duplex:1;			/* Full-duplex operation requested. */
+	unsigned int full_duplex_lock:1;
+	unsigned int fake_addr:1;			/* Multiport board faked address. */
+	unsigned int media2:4;				/* Secondary monitored media port. */
+	unsigned int medialock:1;			/* Do not sense media type. */
+	unsigned int mediasense:1;			/* Media sensing in progress. */
+	unsigned int nway:1, nwayset:1;		/* 21143 internal NWay. */
+	unsigned int default_port:8;		/* Last dev->if_port value. */
+	unsigned char eeprom[EEPROM_SIZE];	/* Serial EEPROM contents. */
+	void (*link_change)(struct net_device *dev, int csr5);
+	u16 lpar;							/* 21143 Link partner ability. */
+	u16 sym_advertise, mii_advertise;	/* NWay to-advertise. */
+	u16 advertising[4];					/* MII advertise, from SROM table. */
+	signed char phys[4], mii_cnt;		/* MII device addresses. */
+	spinlock_t mii_lock;
+	struct mediatable *mtable;
+	int cur_index;						/* Current media index. */
+	int saved_if_port;
+};
+
+static void start_link(struct net_device *dev);
+static void parse_eeprom(struct net_device *dev);
+static int read_eeprom(long ioaddr, int location, int addr_len);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int tulip_open(struct net_device *dev);
+/* Chip-specific media selection (timer functions prototyped above). */
+static int  check_duplex(struct net_device *dev);
+static void select_media(struct net_device *dev, int startup);
+static void init_media(struct net_device *dev);
+static void nway_lnk_change(struct net_device *dev, int csr5);
+static void nway_start(struct net_device *dev);
+static void pnic_lnk_change(struct net_device *dev, int csr5);
+static void pnic_do_nway(struct net_device *dev);
+
+static void tulip_tx_timeout(struct net_device *dev);
+static void tulip_init_ring(struct net_device *dev);
+static int tulip_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int tulip_rx(struct net_device *dev);
+static void tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static int tulip_close(struct net_device *dev);
+static struct net_device_stats *tulip_get_stats(struct net_device *dev);
+#ifdef HAVE_PRIVATE_IOCTL
+static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+#endif
+static void set_rx_mode(struct net_device *dev);
+
+
+
+/* A list of all installed Tulip devices. */
+static struct net_device *root_tulip_dev = NULL;
+
+static void *tulip_probe1(struct pci_dev *pdev, void *init_dev,
+						  long ioaddr, int irq, int pci_tbl_idx, int find_cnt)
+{
+	struct net_device *dev;
+	struct tulip_private *tp;
+	void *priv_mem;
+	/* See note below on the multiport cards. */
+	static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'};
+	static int last_irq = 0;
+	static int multiport_cnt = 0;		/* For four-port boards w/one EEPROM */
+	u8 chip_rev;
+	int i, chip_idx = pci_id_tbl[pci_tbl_idx].drv_flags & 0xff;
+	unsigned short sum;
+	u8 ee_data[EEPROM_SIZE];
+
+	/* Bring the 21041/21143 out of sleep mode.
+	   Caution: Snooze mode does not work with some boards! */
+	if (tulip_tbl[chip_idx].flags & HAS_PWRDWN)
+		pci_write_config_dword(pdev, 0x40, 0x00000000);
+
+	if (inl(ioaddr + CSR5) == 0xffffffff) {
+		printk(KERN_ERR "The Tulip chip at %#lx is not functioning.\n", ioaddr);
+		return 0;
+	}
+
+	dev = init_etherdev(init_dev, 0);
+	if (!dev)
+		return NULL;
+
+	/* Make certain the data structures are quadword aligned. */
+	priv_mem = kmalloc(sizeof(*tp) + PRIV_ALIGN, GFP_KERNEL);
+	/* Check for the very unlikely case of no memory. */
+	if (priv_mem == NULL)
+		return NULL;
+	dev->priv = tp = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+	memset(tp, 0, sizeof(*tp));
+	tp->mii_lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
+	tp->priv_addr = priv_mem;
+
+	tp->next_module = root_tulip_dev;
+	root_tulip_dev = dev;
+
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev);
+
+	printk(KERN_INFO "%s: %s rev %d at %#3lx,",
+		   dev->name, pci_id_tbl[pci_tbl_idx].name, chip_rev, ioaddr);
+
+	/* Stop the Tx and Rx processes. */
+	outl(inl(ioaddr + CSR6) & ~TxOn & ~RxOn, ioaddr + CSR6);
+	/* Clear the missed-packet counter. */
+	inl(ioaddr + CSR8);
+
+	if (chip_idx == DC21041  &&  inl(ioaddr + CSR9) & 0x8000) {
+		printk(" 21040 compatible mode,");
+		chip_idx = DC21040;
+	}
+
+	/* The SROM/EEPROM interface varies dramatically. */
+	sum = 0;
+	if (chip_idx == DC21040) {
+		outl(0, ioaddr + CSR9);		/* Reset the pointer with a dummy write. */
+		for (i = 0; i < 6; i++) {
+			int value, boguscnt = 100000;
+			do
+				value = inl(ioaddr + CSR9);
+			while (value < 0  && --boguscnt > 0);
+			dev->dev_addr[i] = value;
+			sum += value & 0xff;
+		}
+	} else if (chip_idx == LC82C168) {
+		for (i = 0; i < 3; i++) {
+			int value, boguscnt = 100000;
+			outl(0x600 | i, ioaddr + 0x98);
+			do
+				value = inl(ioaddr + CSR9);
+			while (value < 0  && --boguscnt > 0);
+			put_unaligned(le16_to_cpu(value), ((u16*)dev->dev_addr) + i);
+			sum += value & 0xffff;
+		}
+	} else if (chip_idx == COMET) {
+		/* No need to read the EEPROM. */
+		put_unaligned(le32_to_cpu(inl(ioaddr + 0xA4)), (u32 *)dev->dev_addr);
+		put_unaligned(le16_to_cpu(inl(ioaddr + 0xA8)),
+					  (u16 *)(dev->dev_addr + 4));
+		for (i = 0; i < 6; i ++)
+			sum += dev->dev_addr[i];
+	} else {
+		/* A serial EEPROM interface, we read now and sort it out later. */
+		int sa_offset = 0;
+		int ee_addr_size = read_eeprom(ioaddr, 0xff, 8) & 0x40000 ? 8 : 6;
+		int eeprom_word_cnt = 1 << ee_addr_size;
+
+		for (i = 0; i < eeprom_word_cnt; i++)
+			((u16 *)ee_data)[i] =
+				le16_to_cpu(read_eeprom(ioaddr, i, ee_addr_size));
+
+		/* DEC now has a specification (see Notes) but early board makers
+		   just put the address in the first EEPROM locations. */
+		/* This does  memcmp(eedata, eedata+16, 8) */
+		for (i = 0; i < 8; i ++)
+			if (ee_data[i] != ee_data[16+i])
+				sa_offset = 20;
+		if (chip_idx == CONEXANT) {
+			/* Check that the tuple type and length is correct. */
+			if (ee_data[0x198] == 0x04  &&  ee_data[0x199] == 6)
+				sa_offset = 0x19A;
+		} else if (ee_data[0] == 0xff  &&  ee_data[1] == 0xff &&
+				   ee_data[2] == 0) {
+			sa_offset = 2;		/* Grrr, damn Matrox boards. */
+			multiport_cnt = 4;
+		}
+		for (i = 0; i < 6; i ++) {
+			dev->dev_addr[i] = ee_data[i + sa_offset];
+			sum += ee_data[i + sa_offset];
+		}
+	}
+	/* Lite-On boards have the address byte-swapped. */
+	if ((dev->dev_addr[0] == 0xA0  ||  dev->dev_addr[0] == 0xC0)
+		&&  dev->dev_addr[1] == 0x00)
+		for (i = 0; i < 6; i+=2) {
+			char tmp = dev->dev_addr[i];
+			dev->dev_addr[i] = dev->dev_addr[i+1];
+			dev->dev_addr[i+1] = tmp;
+		}
+	/* On the Zynx 315 Etherarray and other multiport boards only the
+	   first Tulip has an EEPROM.
+	   The addresses of the subsequent ports are derived from the first.
+	   Many PCI BIOSes also incorrectly report the IRQ line, so we correct
+	   that here as well. */
+	if (sum == 0  || sum == 6*0xff) {
+		printk(" EEPROM not present,");
+		for (i = 0; i < 5; i++)
+			dev->dev_addr[i] = last_phys_addr[i];
+		dev->dev_addr[i] = last_phys_addr[i] + 1;
+#if defined(__i386__)		/* Patch up x86 BIOS bug. */
+		if (last_irq)
+			irq = last_irq;
+#endif
+	}
+
+	for (i = 0; i < 6; i++)
+		printk("%c%2.2X", i ? ':' : ' ', last_phys_addr[i] = dev->dev_addr[i]);
+	printk(", IRQ %d.\n", irq);
+	last_irq = irq;
+
+	/* We do a request_region() to register /proc/ioports info. */
+	request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
+
+	dev->base_addr = ioaddr;
+	dev->irq = irq;
+
+	tp->pci_dev = pdev;
+	tp->msg_level = (1 << debug) - 1;
+	tp->chip_id = chip_idx;
+	tp->revision = chip_rev;
+	tp->flags = tulip_tbl[chip_idx].flags
+		| (pci_id_tbl[pci_tbl_idx].drv_flags & 0xffffff00);
+	tp->csr0 = csr0;
+
+	/* BugFixes: The 21143-TD hangs with PCI Write-and-Invalidate cycles.
+	   And the ASIX must have a burst limit or horrible things happen. */
+	if (chip_idx == DC21143  &&  chip_rev == 65)
+		tp->csr0 &= ~0x01000000;
+	else if (tp->flags & IS_ASIX)
+		tp->csr0 |= 0x2000;
+
+	/* We support a zillion ways to set the media type. */
+#ifdef TULIP_FULL_DUPLEX
+	tp->full_duplex = 1;
+	tp->full_duplex_lock = 1;
+#endif
+#ifdef TULIP_DEFAULT_MEDIA
+	tp->default_port = TULIP_DEFAULT_MEDIA;
+#endif
+#ifdef TULIP_NO_MEDIA_SWITCH
+	tp->medialock = 1;
+#endif
+
+	/* The lower four bits are the media type. */
+	if (find_cnt >= 0  &&  find_cnt < MAX_UNITS) {
+		if (options[find_cnt] & 0x1f)
+			tp->default_port = options[find_cnt] & 0x1f;
+		if ((options[find_cnt] & 0x200) || full_duplex[find_cnt] > 0)
+			tp->full_duplex = 1;
+		if (mtu[find_cnt] > 0)
+			dev->mtu = mtu[find_cnt];
+	}
+	if (dev->mem_start)
+		tp->default_port = dev->mem_start & 0x1f;
+	if (tp->default_port) {
+		printk(KERN_INFO "%s: Transceiver selection forced to %s.\n",
+			   dev->name, medianame[tp->default_port & MEDIA_MASK]);
+		tp->medialock = 1;
+		if (media_cap[tp->default_port] & MediaAlwaysFD)
+			tp->full_duplex = 1;
+	}
+	if (tp->full_duplex)
+		tp->full_duplex_lock = 1;
+
+	if (media_cap[tp->default_port] & MediaIsMII) {
+		u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 };
+		tp->mii_advertise = media2advert[tp->default_port - 9];
+		tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
+	}
+
+	/* This is logically part of probe1(), but too complex to write inline. */
+	if (tp->flags & HAS_MEDIA_TABLE) {
+		memcpy(tp->eeprom, ee_data, sizeof(tp->eeprom));
+		parse_eeprom(dev);
+	}
+
+	/* The Tulip-specific entries in the device structure. */
+	dev->open = &tulip_open;
+	dev->hard_start_xmit = &tulip_start_xmit;
+	dev->stop = &tulip_close;
+	dev->get_stats = &tulip_get_stats;
+#ifdef HAVE_PRIVATE_IOCTL
+	dev->do_ioctl = &private_ioctl;
+#endif
+#ifdef HAVE_MULTICAST
+	dev->set_multicast_list = &set_rx_mode;
+#endif
+
+	if (tp->flags & HAS_NWAY)
+		tp->link_change = nway_lnk_change;
+	else if (tp->flags & HAS_PNICNWAY)
+		tp->link_change = pnic_lnk_change;
+	start_link(dev);
+	if (chip_idx == COMET) {
+		/* Set the Comet LED configuration. */
+		outl(0xf0000000, ioaddr + CSR9);
+	}
+
+	return dev;
+}
+
+/* Start the link, typically called at probe1() time but sometimes later with
+   multiport cards. */
+static void start_link(struct net_device *dev)
+{
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int i;
+
+	if ((tp->flags & ALWAYS_CHECK_MII) ||
+		(tp->mtable  &&  tp->mtable->has_mii) ||
+		( ! tp->mtable  &&  (tp->flags & HAS_MII))) {
+		int phyn, phy_idx = 0;
+		if (tp->mtable  &&  tp->mtable->has_mii) {
+			for (i = 0; i < tp->mtable->leafcount; i++)
+				if (tp->mtable->mleaf[i].media == 11) {
+					tp->cur_index = i;
+					tp->saved_if_port = dev->if_port;
+					select_media(dev, 2);
+					dev->if_port = tp->saved_if_port;
+					break;
+				}
+		}
+		/* Find the connected MII xcvrs.
+		   Doing this in open() would allow detecting external xcvrs later,
+		   but takes much time. */
+		for (phyn = 1; phyn <= 32 && phy_idx < sizeof(tp->phys); phyn++) {
+			int phy = phyn & 0x1f;
+			int mii_status = mdio_read(dev, phy, 1);
+			if ((mii_status & 0x8301) == 0x8001 ||
+				((mii_status & 0x8000) == 0  && (mii_status & 0x7800) != 0)) {
+				int mii_reg0 = mdio_read(dev, phy, 0);
+				int mii_advert = mdio_read(dev, phy, 4);
+				int to_advert;
+
+				if (tp->mii_advertise)
+					to_advert = tp->mii_advertise;
+				else if (tp->advertising[phy_idx])
+					to_advert = tp->advertising[phy_idx];
+				else			/* Leave unchanged. */
+					tp->mii_advertise = to_advert = mii_advert;
+
+				tp->phys[phy_idx++] = phy;
+				printk(KERN_INFO "%s:  MII transceiver #%d "
+					   "config %4.4x status %4.4x advertising %4.4x.\n",
+					   dev->name, phy, mii_reg0, mii_status, mii_advert);
+				/* Fixup for DLink with miswired PHY. */
+				if (mii_advert != to_advert) {
+					printk(KERN_DEBUG "%s:  Advertising %4.4x on PHY %d,"
+						   " previously advertising %4.4x.\n",
+						   dev->name, to_advert, phy, mii_advert);
+					mdio_write(dev, phy, 4, to_advert);
+				}
+				/* Enable autonegotiation: some boards default to off. */
+				mdio_write(dev, phy, 0, (mii_reg0 & ~0x3000) |
+						   (tp->full_duplex ? 0x0100 : 0x0000) |
+						   ((media_cap[tp->default_port] & MediaIs100) ?
+							0x2000 : 0x1000));
+			}
+		}
+		tp->mii_cnt = phy_idx;
+		if (tp->mtable  &&  tp->mtable->has_mii  &&  phy_idx == 0) {
+			printk(KERN_INFO "%s: ***WARNING***: No MII transceiver found!\n",
+				   dev->name);
+			tp->phys[0] = 1;
+		}
+	}
+
+	/* Reset the xcvr interface and turn on heartbeat. */
+	switch (tp->chip_id) {
+	case DC21040:
+		outl(0x00000000, ioaddr + CSR13);
+		outl(0x00000004, ioaddr + CSR13);
+		break;
+	case DC21041:
+		/* This is nway_start(). */
+		if (tp->sym_advertise == 0)
+			tp->sym_advertise = 0x0061;
+		outl(0x00000000, ioaddr + CSR13);
+		outl(0xFFFFFFFF, ioaddr + CSR14);
+		outl(0x00000008, ioaddr + CSR15); /* Listen on AUI also. */
+		outl(inl(ioaddr + CSR6) | FullDuplex, ioaddr + CSR6);
+		outl(0x0000EF01, ioaddr + CSR13);
+		break;
+	case DC21140: default:
+		if (tp->mtable)
+			outl(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
+		break;
+	case DC21142:
+	case PNIC2:
+		if (tp->mii_cnt  ||  media_cap[dev->if_port] & MediaIsMII) {
+			outl(0x82020000, ioaddr + CSR6);
+			outl(0x0000, ioaddr + CSR13);
+			outl(0x0000, ioaddr + CSR14);
+			outl(0x820E0000, ioaddr + CSR6);
+		} else
+			nway_start(dev);
+		break;
+	case LC82C168:
+		if ( ! tp->mii_cnt) {
+			tp->nway = 1;
+			tp->nwayset = 0;
+			outl(0x00420000, ioaddr + CSR6);
+			outl(0x30, ioaddr + CSR12);
+			outl(0x0001F078, ioaddr + 0xB8);
+			outl(0x0201F078, ioaddr + 0xB8); /* Turn on autonegotiation. */
+		}
+		break;
+	case MX98713: case COMPEX9881:
+		outl(0x00000000, ioaddr + CSR6);
+		outl(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
+		outl(0x00000001, ioaddr + CSR13);
+		break;
+	case MX98715: case MX98725:
+		outl(0x01a80000, ioaddr + CSR6);
+		outl(0xFFFFFFFF, ioaddr + CSR14);
+		outl(0x00001000, ioaddr + CSR12);
+		break;
+	case COMET:
+		break;
+	}
+
+	if (tp->flags & HAS_PWRDWN)
+		pci_write_config_dword(tp->pci_dev, 0x40, 0x40000000);
+}
+
+
+/* Serial EEPROM section. */
+/* The main routine to parse the very complicated SROM structure.
+   Search www.digital.com for "21X4 SROM" to get details.
+   This code is very complex, and will require changes to support
+   additional cards, so I will be verbose about what is going on.
+   */
+
+/* Known cards that have old-style EEPROMs.
+   Writing this table is described at
+   http://www.scyld.com/network/tulip-media.html
+*/
+static struct fixups {
+  char *name;
+  unsigned char addr0, addr1, addr2;
+  u16 newtable[32];				/* Max length below. */
+} eeprom_fixups[] = {
+  {"Asante", 0, 0, 0x94, {0x1e00, 0x0000, 0x0800, 0x0100, 0x018c,
+						  0x0000, 0x0000, 0xe078, 0x0001, 0x0050, 0x0018 }},
+  {"SMC9332DST", 0, 0, 0xC0, { 0x1e00, 0x0000, 0x0800, 0x041f,
+							   0x0000, 0x009E, /* 10baseT */
+							   0x0004, 0x009E, /* 10baseT-FD */
+							   0x0903, 0x006D, /* 100baseTx */
+							   0x0905, 0x006D, /* 100baseTx-FD */ }},
+  {"Cogent EM100", 0, 0, 0x92, { 0x1e00, 0x0000, 0x0800, 0x063f,
+								 0x0107, 0x8021, /* 100baseFx */
+								 0x0108, 0x8021, /* 100baseFx-FD */
+								 0x0100, 0x009E, /* 10baseT */
+								 0x0104, 0x009E, /* 10baseT-FD */
+								 0x0103, 0x006D, /* 100baseTx */
+								 0x0105, 0x006D, /* 100baseTx-FD */ }},
+  {"Maxtech NX-110", 0, 0, 0xE8, { 0x1e00, 0x0000, 0x0800, 0x0513,
+							   0x1001, 0x009E, /* 10base2, CSR12 0x10*/
+							   0x0000, 0x009E, /* 10baseT */
+							   0x0004, 0x009E, /* 10baseT-FD */
+							   0x0303, 0x006D, /* 100baseTx, CSR12 0x03 */
+							   0x0305, 0x006D, /* 100baseTx-FD CSR12 0x03 */}},
+  {"Accton EN1207", 0, 0, 0xE8, { 0x1e00, 0x0000, 0x0800, 0x051F,
+								  0x1B01, 0x0000, /* 10base2,   CSR12 0x1B */
+								  0x0B00, 0x009E, /* 10baseT,   CSR12 0x0B */
+								  0x0B04, 0x009E, /* 10baseT-FD,CSR12 0x0B */
+								  0x1B03, 0x006D, /* 100baseTx, CSR12 0x1B */
+								  0x1B05, 0x006D, /* 100baseTx-FD CSR12 0x1B */
+   }},
+  {0, 0, 0, 0, {}}};
+
+static const char * block_name[] = {"21140 non-MII", "21140 MII PHY",
+ "21142 Serial PHY", "21142 MII PHY", "21143 SYM PHY", "21143 reset method"};
+
+#if defined(__i386__)			/* AKA get_unaligned() */
+#define get_u16(ptr) (*(u16 *)(ptr))
+#else
+#define get_u16(ptr) (((u8*)(ptr))[0] + (((u8*)(ptr))[1]<<8))
+#endif
+
+static void parse_eeprom(struct net_device *dev)
+{
+	/* The last media info list parsed, for multiport boards.  */
+	static struct mediatable *last_mediatable = NULL;
+	static unsigned char *last_ee_data = NULL;
+	static int controller_index = 0;
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	unsigned char *p, *ee_data = tp->eeprom;
+	int new_advertise = 0;
+	int i;
+
+	tp->mtable = 0;
+	/* Detect an old-style (SA only) EEPROM layout:
+	   memcmp(eedata, eedata+16, 8). */
+	for (i = 0; i < 8; i ++)
+		if (ee_data[i] != ee_data[16+i])
+			break;
+	if (i >= 8) {
+		if (ee_data[0] == 0xff) {
+			if (last_mediatable) {
+				controller_index++;
+				printk(KERN_INFO "%s:  Controller %d of multiport board.\n",
+					   dev->name, controller_index);
+				tp->mtable = last_mediatable;
+				ee_data = last_ee_data;
+				goto subsequent_board;
+			} else
+				printk(KERN_INFO "%s:  Missing EEPROM, this interface may "
+					   "not work correctly!\n",
+					   dev->name);
+			return;
+		}
+		/* Do a fix-up based on the vendor half of the station address. */
+		for (i = 0; eeprom_fixups[i].name; i++) {
+			if (dev->dev_addr[0] == eeprom_fixups[i].addr0
+				&&  dev->dev_addr[1] == eeprom_fixups[i].addr1
+				&&  dev->dev_addr[2] == eeprom_fixups[i].addr2) {
+				if (dev->dev_addr[2] == 0xE8  &&  ee_data[0x1a] == 0x55)
+					i++;		/* An Accton EN1207, not an outlaw Maxtech. */
+				memcpy(ee_data + 26, eeprom_fixups[i].newtable,
+					   sizeof(eeprom_fixups[i].newtable));
+				printk(KERN_INFO "%s: Old format EEPROM on '%s' board.\n"
+					   KERN_INFO "%s: Using substitute media control info.\n",
+					   dev->name, eeprom_fixups[i].name, dev->name);
+				break;
+			}
+		}
+		if (eeprom_fixups[i].name == NULL) { /* No fixup found. */
+			printk(KERN_INFO "%s: Old style EEPROM with no media selection "
+				   "information.\n",
+				   dev->name);
+			return;
+		}
+	}
+
+	controller_index = 0;
+	if (ee_data[19] > 1) {
+		struct net_device *prev_dev;
+		struct tulip_private *otp;
+		/* This is a multiport board.  The probe order may be "backwards", so
+		   we patch up already found devices. */
+		last_ee_data = ee_data;
+		for (prev_dev = tp->next_module; prev_dev; prev_dev = otp->next_module) {
+			otp = (struct tulip_private *)prev_dev->priv;
+			if (otp->eeprom[0] == 0xff  &&  otp->mtable == 0) {
+				parse_eeprom(prev_dev);
+				start_link(prev_dev);
+			} else
+				break;
+		}
+		controller_index = 0;
+	}
+subsequent_board:
+
+	p = (void *)ee_data + ee_data[27 + controller_index*3];
+	if (ee_data[27] == 0) {		/* No valid media table. */
+	} else if (tp->chip_id == DC21041) {
+		int media = get_u16(p);
+		int count = p[2];
+		p += 3;
+
+		printk(KERN_INFO "%s: 21041 Media table, default media %4.4x (%s).\n",
+			   dev->name, media,
+			   media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]);
+		for (i = 0; i < count; i++) {
+			unsigned char media_block = *p++;
+			int media_code = media_block & MEDIA_MASK;
+			if (media_block & 0x40)
+				p += 6;
+			switch(media_code) {
+			case 0: new_advertise |= 0x0020; break;
+			case 4: new_advertise |= 0x0040; break;
+			}
+			printk(KERN_INFO "%s:  21041 media #%d, %s.\n",
+				   dev->name, media_code, medianame[media_code]);
+		}
+	} else {
+		unsigned char csr12dir = 0;
+		int count;
+		struct mediatable *mtable;
+		u16 media = get_u16(p);
+
+		p += 2;
+		if (tp->flags & CSR12_IN_SROM)
+			csr12dir = *p++;
+		count = *p++;
+		mtable = (struct mediatable *)
+			kmalloc(sizeof(struct mediatable) + count*sizeof(struct medialeaf),
+					GFP_KERNEL);
+		if (mtable == NULL)
+			return;				/* Horrible, impossible failure. */
+		last_mediatable = tp->mtable = mtable;
+		mtable->defaultmedia = media;
+		mtable->leafcount = count;
+		mtable->csr12dir = csr12dir;
+		mtable->has_nonmii = mtable->has_mii = mtable->has_reset = 0;
+		mtable->csr15dir = mtable->csr15val = 0;
+
+		printk(KERN_INFO "%s:  EEPROM default media type %s.\n", dev->name,
+			   media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]);
+		for (i = 0; i < count; i++) {
+			struct medialeaf *leaf = &mtable->mleaf[i];
+
+			if ((p[0] & 0x80) == 0) { /* 21140 Compact block. */
+				leaf->type = 0;
+				leaf->media = p[0] & 0x3f;
+				leaf->leafdata = p;
+				if ((p[2] & 0x61) == 0x01)	/* Bogus, but Znyx boards do it. */
+					mtable->has_mii = 1;
+				p += 4;
+			} else {
+				switch(leaf->type = p[1]) {
+				case 5:
+					mtable->has_reset = i + 1; /* Assure non-zero */
+					/* Fall through */
+				case 6:
+					leaf->media = 31;
+					break;
+				case 1: case 3:
+					mtable->has_mii = 1;
+					leaf->media = 11;
+					break;
+				case 2:
+					if ((p[2] & 0x3f) == 0) {
+						u32 base15 = (p[2] & 0x40) ? get_u16(p + 7) : 0x0008;
+						u16 *p1 = (u16 *)(p + (p[2] & 0x40 ? 9 : 3));
+						mtable->csr15dir = (get_unaligned(p1 + 0)<<16) + base15;
+						mtable->csr15val = (get_unaligned(p1 + 1)<<16) + base15;
+					}
+					/* Fall through. */
+				case 0: case 4:
+					mtable->has_nonmii = 1;
+					leaf->media = p[2] & MEDIA_MASK;
+					switch (leaf->media) {
+					case 0: new_advertise |= 0x0020; break;
+					case 4: new_advertise |= 0x0040; break;
+					case 3: new_advertise |= 0x0080; break;
+					case 5: new_advertise |= 0x0100; break;
+					case 6: new_advertise |= 0x0200; break;
+					}
+					break;
+				default:
+					leaf->media = 19;
+				}
+				leaf->leafdata = p + 2;
+				p += (p[0] & 0x3f) + 1;
+			}
+			if ((tp->msg_level & NETIF_MSG_LINK) &&
+				leaf->media == 11) {
+				unsigned char *bp = leaf->leafdata;
+				printk(KERN_INFO "%s:  MII interface PHY %d, setup/reset "
+					   "sequences %d/%d long, capabilities %2.2x %2.2x.\n",
+					   dev->name, bp[0], bp[1], bp[2 + bp[1]*2],
+					   bp[5 + bp[2 + bp[1]*2]*2], bp[4 + bp[2 + bp[1]*2]*2]);
+			}
+			if (tp->msg_level & NETIF_MSG_PROBE)
+				printk(KERN_INFO "%s:  Index #%d - Media %s (#%d) described "
+					   "by a %s (%d) block.\n",
+					   dev->name, i, medianame[leaf->media], leaf->media,
+					   leaf->type < 6 ? block_name[leaf->type] : "UNKNOWN",
+					   leaf->type);
+		}
+		if (new_advertise)
+			tp->sym_advertise = new_advertise;
+	}
+}
+/* Reading a serial EEPROM is a "bit" grungy, but we work our way through:->.*/
+
+/*  EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK	0x02	/* EEPROM shift clock. */
+#define EE_CS			0x01	/* EEPROM chip select. */
+#define EE_DATA_WRITE	0x04	/* Data from the Tulip to EEPROM. */
+#define EE_WRITE_0		0x01
+#define EE_WRITE_1		0x05
+#define EE_DATA_READ	0x08	/* Data from the EEPROM chip. */
+#define EE_ENB			(0x4800 | EE_CS)
+
+/* Delay between EEPROM clock transitions.
+   Even at 33Mhz current PCI implementations do not overrun the EEPROM clock.
+   We add a bus turn-around to insure that this remains true. */
+#define eeprom_delay()	inl(ee_addr)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_READ_CMD		(6)
+
+/* Note: this routine returns extra data bits for size detection. */
+static int read_eeprom(long ioaddr, int location, int addr_len)
+{
+	int i;
+	unsigned retval = 0;
+	long ee_addr = ioaddr + CSR9;
+	int read_cmd = location | (EE_READ_CMD << addr_len);
+
+	outl(EE_ENB & ~EE_CS, ee_addr);
+	outl(EE_ENB, ee_addr);
+
+	/* Shift the read command bits out. */
+	for (i = 4 + addr_len; i >= 0; i--) {
+		short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+		outl(EE_ENB | dataval, ee_addr);
+		eeprom_delay();
+		outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+		eeprom_delay();
+		retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
+	}
+	outl(EE_ENB, ee_addr);
+	eeprom_delay();
+
+	for (i = 16; i > 0; i--) {
+		outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
+		eeprom_delay();
+		retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
+		outl(EE_ENB, ee_addr);
+		eeprom_delay();
+	}
+
+	/* Terminate the EEPROM access. */
+	outl(EE_ENB & ~EE_CS, ee_addr);
+	return retval;
+}
+
+/* MII transceiver control section.
+   Read and write the MII registers using software-generated serial
+   MDIO protocol.  See the MII specifications or DP83840A data sheet
+   for details. */
+
+/* The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
+   met by back-to-back PCI I/O cycles, but we insert a delay to avoid
+   "overclocking" issues or future 66Mhz PCI. */
+#define mdio_delay() inl(mdio_addr)
+
+/* Read and write the MII registers using software-generated serial
+   MDIO protocol.  It is just different enough from the EEPROM protocol
+   to not share code.  The maxium data clock rate is 2.5 Mhz. */
+#define MDIO_SHIFT_CLK	0x10000
+#define MDIO_DATA_WRITE0 0x00000
+#define MDIO_DATA_WRITE1 0x20000
+#define MDIO_ENB		0x00000		/* Ignore the 0x02000 databook setting. */
+#define MDIO_ENB_IN		0x40000
+#define MDIO_DATA_READ	0x80000
+
+static const unsigned char comet_miireg2offset[32] = {
+	0xB4, 0xB8, 0xBC, 0xC0,  0xC4, 0xC8, 0xCC, 0,  0,0,0,0,  0,0,0,0,
+	0,0xD0,0,0,  0,0,0,0,  0,0,0,0, 0, 0xD4, 0xD8, 0xDC, };
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	int i;
+	int read_cmd = (0xf6 << 10) | ((phy_id & 0x1f) << 5) | location;
+	int retval = 0;
+	long ioaddr = dev->base_addr;
+	long mdio_addr = ioaddr + CSR9;
+	unsigned long flags;
+
+	if (location & ~0x1f)
+		return 0xffff;
+
+	if (tp->chip_id == COMET  &&  phy_id == 30) {
+		if (comet_miireg2offset[location])
+			return inl(ioaddr + comet_miireg2offset[location]);
+		return 0xffff;
+	}
+
+	spin_lock_irqsave(&tp->mii_lock, flags);
+	if (tp->chip_id == LC82C168) {
+		int i = 1000;
+		outl(0x60020000 + (phy_id<<23) + (location<<18), ioaddr + 0xA0);
+		inl(ioaddr + 0xA0);
+		inl(ioaddr + 0xA0);
+		inl(ioaddr + 0xA0);
+		inl(ioaddr + 0xA0);
+		while (--i > 0)
+			if ( ! ((retval = inl(ioaddr + 0xA0)) & 0x80000000))
+				break;
+		spin_unlock_irqrestore(&tp->mii_lock, flags);
+		return retval & 0xffff;
+	}
+
+	/* Establish sync by sending at least 32 logic ones. */
+	for (i = 32; i >= 0; i--) {
+		outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
+		mdio_delay();
+		outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+	/* Shift the read command bits out. */
+	for (i = 15; i >= 0; i--) {
+		int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
+
+		outl(MDIO_ENB | dataval, mdio_addr);
+		mdio_delay();
+		outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+	/* Read the two transition, 16 data, and wire-idle bits. */
+	for (i = 19; i > 0; i--) {
+		outl(MDIO_ENB_IN, mdio_addr);
+		mdio_delay();
+		retval = (retval << 1) | ((inl(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
+		outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+	spin_unlock_irqrestore(&tp->mii_lock, flags);
+	return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int location, int val)
+{
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	int i;
+	int cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | (val & 0xffff);
+	long ioaddr = dev->base_addr;
+	long mdio_addr = ioaddr + CSR9;
+	unsigned long flags;
+
+	if (location & ~0x1f)
+		return;
+
+	if (tp->chip_id == COMET  &&  phy_id == 30) {
+		if (comet_miireg2offset[location])
+			outl(val, ioaddr + comet_miireg2offset[location]);
+		return;
+	}
+
+	spin_lock_irqsave(&tp->mii_lock, flags);
+	if (tp->chip_id == LC82C168) {
+		int i = 1000;
+		outl(cmd, ioaddr + 0xA0);
+		do
+			if ( ! (inl(ioaddr + 0xA0) & 0x80000000))
+				break;
+		while (--i > 0);
+		spin_unlock_irqrestore(&tp->mii_lock, flags);
+		return;
+	}
+
+	/* Establish sync by sending 32 logic ones. */
+	for (i = 32; i >= 0; i--) {
+		outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
+		mdio_delay();
+		outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+	/* Shift the command bits out. */
+	for (i = 31; i >= 0; i--) {
+		int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
+		outl(MDIO_ENB | dataval, mdio_addr);
+		mdio_delay();
+		outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+	/* Clear out extra bits. */
+	for (i = 2; i > 0; i--) {
+		outl(MDIO_ENB_IN, mdio_addr);
+		mdio_delay();
+		outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+	spin_unlock_irqrestore(&tp->mii_lock, flags);
+	return;
+}
+
+
+static int
+tulip_open(struct net_device *dev)
+{
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int next_tick = 3*HZ;
+
+	/* Wake the chip from sleep/snooze mode. */
+	if (tp->flags & HAS_PWRDWN)
+		pci_write_config_dword(tp->pci_dev, 0x40, 0);
+
+	/* On some chip revs we must set the MII/SYM port before the reset!? */
+	if (tp->mii_cnt  ||  (tp->mtable  &&  tp->mtable->has_mii))
+		outl(0x00040000, ioaddr + CSR6);
+
+	/* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
+	outl(0x00000001, ioaddr + CSR0);
+
+	MOD_INC_USE_COUNT;
+
+	/* This would be done after interrupts are initialized, but we do not want
+	   to frob the transceiver only to fail later. */
+	if (request_irq(dev->irq, &tulip_interrupt, SA_SHIRQ, dev->name, dev)) {
+		MOD_DEC_USE_COUNT;
+		return -EAGAIN;
+	}
+
+	/* Deassert reset.
+	   Wait the specified 50 PCI cycles after a reset by initializing
+	   Tx and Rx queues and the address filter list. */
+	outl(tp->csr0, ioaddr + CSR0);
+
+	if (tp->msg_level & NETIF_MSG_IFUP)
+		printk(KERN_DEBUG "%s: tulip_open() irq %d.\n", dev->name, dev->irq);
+
+	tulip_init_ring(dev);
+
+	if (tp->chip_id == PNIC2) {
+		u32 addr_high = (dev->dev_addr[1]<<8) + (dev->dev_addr[0]<<0);
+		/* This address setting does not appear to impact chip operation?? */
+		outl((dev->dev_addr[5]<<8) + dev->dev_addr[4] +
+			 (dev->dev_addr[3]<<24) + (dev->dev_addr[2]<<16),
+			 ioaddr + 0xB0);
+		outl(addr_high + (addr_high<<16), ioaddr + 0xB8);
+	}
+	if (tp->flags & MC_HASH_ONLY) {
+		u32 addr_low = cpu_to_le32(get_unaligned((u32 *)dev->dev_addr));
+		u32 addr_high = cpu_to_le16(get_unaligned((u16 *)(dev->dev_addr+4)));
+		if (tp->flags & IS_ASIX) {
+			outl(0, ioaddr + CSR13);
+			outl(addr_low,  ioaddr + CSR14);
+			outl(1, ioaddr + CSR13);
+			outl(addr_high, ioaddr + CSR14);
+		} else if (tp->flags & COMET_MAC_ADDR) {
+			outl(addr_low,  ioaddr + 0xA4);
+			outl(addr_high, ioaddr + 0xA8);
+			outl(0, ioaddr + 0xAC);
+			outl(0, ioaddr + 0xB0);
+		}
+	}
+
+	outl(virt_to_bus(tp->rx_ring), ioaddr + CSR3);
+	outl(virt_to_bus(tp->tx_ring), ioaddr + CSR4);
+
+	if ( ! tp->full_duplex_lock)
+		tp->full_duplex = 0;
+	init_media(dev);
+	if (media_cap[dev->if_port] & MediaIsMII)
+		check_duplex(dev);
+	set_rx_mode(dev);
+
+	/* Start the Tx to process setup frame. */
+	outl(tp->csr6, ioaddr + CSR6);
+	outl(tp->csr6 | TxOn, ioaddr + CSR6);
+
+	netif_start_tx_queue(dev);
+
+	/* Enable interrupts by setting the interrupt mask. */
+	outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
+	outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+	outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
+	outl(0, ioaddr + CSR2);		/* Rx poll demand */
+
+	if (tp->msg_level & NETIF_MSG_IFUP)
+		printk(KERN_DEBUG "%s: Done tulip_open(), CSR0 %8.8x, CSR5 %8.8x CSR6 "
+			   "%8.8x.\n", dev->name, (int)inl(ioaddr + CSR0),
+			   (int)inl(ioaddr + CSR5), (int)inl(ioaddr + CSR6));
+
+	/* Set the timer to switch to check for link beat and perhaps switch
+	   to an alternate media type. */
+	init_timer(&tp->timer);
+	tp->timer.expires = jiffies + next_tick;
+	tp->timer.data = (unsigned long)dev;
+	tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
+	add_timer(&tp->timer);
+
+	return 0;
+}
+
+static void init_media(struct net_device *dev)
+{
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int i;
+
+	tp->saved_if_port = dev->if_port;
+	if (dev->if_port == 0)
+		dev->if_port = tp->default_port;
+
+	/* Allow selecting a default media. */
+	i = 0;
+	if (tp->mtable == NULL)
+		goto media_picked;
+	if (dev->if_port) {
+		int looking_for = media_cap[dev->if_port] & MediaIsMII ? 11 :
+			(dev->if_port == 12 ? 0 : dev->if_port);
+		for (i = 0; i < tp->mtable->leafcount; i++)
+			if (tp->mtable->mleaf[i].media == looking_for) {
+				printk(KERN_INFO "%s: Using user-specified media %s.\n",
+					   dev->name, medianame[dev->if_port]);
+				goto media_picked;
+			}
+	}
+	if ((tp->mtable->defaultmedia & 0x0800) == 0) {
+		int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
+		for (i = 0; i < tp->mtable->leafcount; i++)
+			if (tp->mtable->mleaf[i].media == looking_for) {
+				printk(KERN_INFO "%s: Using EEPROM-set media %s.\n",
+					   dev->name, medianame[looking_for]);
+				goto media_picked;
+			}
+	}
+	/* Start sensing first non-full-duplex media. */
+	for (i = tp->mtable->leafcount - 1;
+		 (media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
+		;
+media_picked:
+
+	tp->csr6 = 0;
+	tp->cur_index = i;
+	tp->nwayset = 0;
+
+	if (dev->if_port) {
+		if (tp->chip_id == DC21143  &&
+			(media_cap[dev->if_port] & MediaIsMII)) {
+			/* We must reset the media CSRs when we force-select MII mode. */
+			outl(0x0000, ioaddr + CSR13);
+			outl(0x0000, ioaddr + CSR14);
+			outl(0x0008, ioaddr + CSR15);
+		}
+		select_media(dev, 1);
+		return;
+	}
+	switch(tp->chip_id) {
+	case DC21041:
+		/* tp->nway = 1;*/
+		nway_start(dev);
+		break;
+	case DC21142:
+		if (tp->mii_cnt) {
+			select_media(dev, 1);
+			if (tp->msg_level & NETIF_MSG_LINK)
+				printk(KERN_INFO "%s: Using MII transceiver %d, status "
+					   "%4.4x.\n",
+					   dev->name, tp->phys[0], mdio_read(dev, tp->phys[0], 1));
+			outl(0x82020000, ioaddr + CSR6);
+			tp->csr6 = 0x820E0000;
+			dev->if_port = 11;
+			outl(0x0000, ioaddr + CSR13);
+			outl(0x0000, ioaddr + CSR14);
+		} else
+			nway_start(dev);
+		break;
+	case PNIC2:
+		nway_start(dev);
+		break;
+	case LC82C168:
+		if (tp->mii_cnt) {
+			dev->if_port = 11;
+			tp->csr6 = 0x814C0000 | (tp->full_duplex ? FullDuplex : 0);
+			outl(0x0001, ioaddr + CSR15);
+		} else if (inl(ioaddr + CSR5) & TPLnkPass)
+			pnic_do_nway(dev);
+		else {
+			/* Start with 10mbps to do autonegotiation. */
+			outl(0x32, ioaddr + CSR12);
+			tp->csr6 = 0x00420000;
+			outl(0x0001B078, ioaddr + 0xB8);
+			outl(0x0201B078, ioaddr + 0xB8);
+		}
+		break;
+	case MX98713: case COMPEX9881:
+		dev->if_port = 0;
+		tp->csr6 = 0x01880000 | (tp->full_duplex ? FullDuplex : 0);
+		outl(0x0f370000 | inw(ioaddr + 0x80), ioaddr + 0x80);
+		break;
+	case MX98715: case MX98725:
+		/* Provided by BOLO, Macronix - 12/10/1998. */
+		dev->if_port = 0;
+		tp->csr6 = 0x01a80000 | FullDuplex;
+		outl(0x0f370000 | inw(ioaddr + 0x80), ioaddr + 0x80);
+		outl(0x11000 | inw(ioaddr + 0xa0), ioaddr + 0xa0);
+		break;
+	case COMET: case CONEXANT:
+		/* Enable automatic Tx underrun recovery. */
+		outl(inl(ioaddr + 0x88) | 1, ioaddr + 0x88);
+		dev->if_port = tp->mii_cnt ? 11 : 0;
+		tp->csr6 = 0x00040000;
+		break;
+	case AX88140: case AX88141:
+		tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
+		break;
+	default:
+		select_media(dev, 1);
+	}
+}
+
+/* Set up the transceiver control registers for the selected media type.
+   STARTUP indicates to reset the transceiver.  It is set to '2' for
+   the initial card detection, and '1' during resume or open().
+*/
+static void select_media(struct net_device *dev, int startup)
+{
+	long ioaddr = dev->base_addr;
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	struct mediatable *mtable = tp->mtable;
+	u32 new_csr6;
+	int i;
+
+	if (mtable) {
+		struct medialeaf *mleaf = &mtable->mleaf[tp->cur_index];
+		unsigned char *p = mleaf->leafdata;
+		if (tp->msg_level & NETIF_MSG_LINK)
+			printk(KERN_DEBUG "%s:  Media table type %d.\n",
+				   dev->name, mleaf->type);
+		switch (mleaf->type) {
+		case 0:					/* 21140 non-MII xcvr. */
+			if (tp->msg_level & NETIF_MSG_LINK)
+				printk(KERN_DEBUG "%s: Using a 21140 non-MII transceiver"
+					   " with control setting %2.2x.\n",
+					   dev->name, p[1]);
+			dev->if_port = p[0];
+			if (startup)
+				outl(mtable->csr12dir | 0x100, ioaddr + CSR12);
+			outl(p[1], ioaddr + CSR12);
+			new_csr6 = 0x02000000 | ((p[2] & 0x71) << 18);
+			break;
+		case 2: case 4: {
+			u16 setup[5];
+			u32 csr13val, csr14val, csr15dir, csr15val;
+			for (i = 0; i < 5; i++)
+				setup[i] = get_u16(&p[i*2 + 1]);
+
+			dev->if_port = p[0] & MEDIA_MASK;
+			if (media_cap[dev->if_port] & MediaAlwaysFD)
+				tp->full_duplex = 1;
+
+			if (startup && mtable->has_reset) {
+				struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset-1];
+				unsigned char *rst = rleaf->leafdata;
+				if (tp->msg_level & NETIF_MSG_LINK)
+					printk(KERN_DEBUG "%s: Resetting the transceiver.\n",
+						   dev->name);
+				for (i = 0; i < rst[0]; i++)
+					outl(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
+			}
+			if (tp->msg_level & NETIF_MSG_LINK)
+				printk(KERN_DEBUG "%s: 21143 non-MII %s transceiver control "
+					   "%4.4x/%4.4x.\n",
+					   dev->name, medianame[dev->if_port], setup[0], setup[1]);
+			if (p[0] & 0x40) {	/* SIA (CSR13-15) setup values are provided. */
+				csr13val = setup[0];
+				csr14val = setup[1];
+				csr15dir = (setup[3]<<16) | setup[2];
+				csr15val = (setup[4]<<16) | setup[2];
+				outl(0, ioaddr + CSR13);
+				outl(csr14val, ioaddr + CSR14);
+				outl(csr15dir, ioaddr + CSR15);	/* Direction */
+				outl(csr15val, ioaddr + CSR15);	/* Data */
+				outl(csr13val, ioaddr + CSR13);
+			} else {
+				csr13val = 1;
+				csr14val = 0x0003FFFF;
+				csr15dir = (setup[0]<<16) | 0x0008;
+				csr15val = (setup[1]<<16) | 0x0008;
+				if (dev->if_port <= 4)
+					csr14val = t21142_csr14[dev->if_port];
+				if (startup) {
+					outl(0, ioaddr + CSR13);
+					outl(csr14val, ioaddr + CSR14);
+				}
+				outl(csr15dir, ioaddr + CSR15);	/* Direction */
+				outl(csr15val, ioaddr + CSR15);	/* Data */
+				if (startup) outl(csr13val, ioaddr + CSR13);
+			}
+			if (tp->msg_level & NETIF_MSG_LINK)
+				printk(KERN_DEBUG "%s:  Setting CSR15 to %8.8x/%8.8x.\n",
+					   dev->name, csr15dir, csr15val);
+			if (mleaf->type == 4)
+				new_csr6 = 0x820A0000 | ((setup[2] & 0x71) << 18);
+			else
+				new_csr6 = 0x82420000;
+			break;
+		}
+		case 1: case 3: {
+			int phy_num = p[0];
+			int init_length = p[1];
+			u16 *misc_info;
+
+			dev->if_port = 11;
+			new_csr6 = 0x020E0000;
+			if (mleaf->type == 3) {	/* 21142 */
+				u16 *init_sequence = (u16*)(p+2);
+				u16 *reset_sequence = &((u16*)(p+3))[init_length];
+				int reset_length = p[2 + init_length*2];
+				misc_info = reset_sequence + reset_length;
+				if (startup)
+					for (i = 0; i < reset_length; i++)
+						outl(get_u16(&reset_sequence[i]) << 16, ioaddr + CSR15);
+				for (i = 0; i < init_length; i++)
+					outl(get_u16(&init_sequence[i]) << 16, ioaddr + CSR15);
+			} else {
+				u8 *init_sequence = p + 2;
+				u8 *reset_sequence = p + 3 + init_length;
+				int reset_length = p[2 + init_length];
+				misc_info = (u16*)(reset_sequence + reset_length);
+				if (startup) {
+					outl(mtable->csr12dir | 0x100, ioaddr + CSR12);
+					for (i = 0; i < reset_length; i++)
+						outl(reset_sequence[i], ioaddr + CSR12);
+				}
+				for (i = 0; i < init_length; i++)
+					outl(init_sequence[i], ioaddr + CSR12);
+			}
+			tp->advertising[phy_num] = get_u16(&misc_info[1]) | 1;
+			if (startup < 2) {
+				if (tp->mii_advertise == 0)
+					tp->mii_advertise = tp->advertising[phy_num];
+				if (tp->msg_level & NETIF_MSG_LINK)
+					printk(KERN_DEBUG "%s:  Advertising %4.4x on MII %d.\n",
+						   dev->name, tp->mii_advertise, tp->phys[phy_num]);
+				mdio_write(dev, tp->phys[phy_num], 4, tp->mii_advertise);
+			}
+			break;
+		}
+		default:
+			printk(KERN_DEBUG "%s:  Invalid media table selection %d.\n",
+					   dev->name, mleaf->type);
+			new_csr6 = 0x020E0000;
+		}
+		if (tp->msg_level & NETIF_MSG_LINK)
+			printk(KERN_DEBUG "%s: Using media type %s, CSR12 is %2.2x.\n",
+				   dev->name, medianame[dev->if_port],
+				   (int)inl(ioaddr + CSR12) & 0xff);
+	} else if (tp->chip_id == DC21041) {
+		int port = dev->if_port <= 4 ? dev->if_port : 0;
+		if (tp->msg_level & NETIF_MSG_LINK)
+			printk(KERN_DEBUG "%s: 21041 using media %s, CSR12 is %4.4x.\n",
+				   dev->name, medianame[port == 3 ? 12: port],
+				   (int)inl(ioaddr + CSR12));
+		outl(0x00000000, ioaddr + CSR13); /* Reset the serial interface */
+		outl(t21041_csr14[port], ioaddr + CSR14);
+		outl(t21041_csr15[port], ioaddr + CSR15);
+		outl(t21041_csr13[port], ioaddr + CSR13);
+		new_csr6 = 0x80020000;
+	} else if (tp->chip_id == LC82C168) {
+		if (startup && ! tp->medialock)
+			dev->if_port = tp->mii_cnt ? 11 : 0;
+		if (tp->msg_level & NETIF_MSG_LINK)
+			printk(KERN_DEBUG "%s: PNIC PHY status is %3.3x, media %s.\n",
+				   dev->name, (int)inl(ioaddr + 0xB8),
+				   medianame[dev->if_port]);
+		if (tp->mii_cnt) {
+			new_csr6 = 0x810C0000;
+			outl(0x0001, ioaddr + CSR15);
+			outl(0x0201B07A, ioaddr + 0xB8);
+		} else if (startup) {
+			/* Start with 10mbps to do autonegotiation. */
+			outl(0x32, ioaddr + CSR12);
+			new_csr6 = 0x00420000;
+			outl(0x0001B078, ioaddr + 0xB8);
+			outl(0x0201B078, ioaddr + 0xB8);
+		} else if (dev->if_port == 3  ||  dev->if_port == 5) {
+			outl(0x33, ioaddr + CSR12);
+			new_csr6 = 0x01860000;
+			/* Trigger autonegotiation. */
+			outl(startup ? 0x0201F868 : 0x0001F868, ioaddr + 0xB8);
+		} else {
+			outl(0x32, ioaddr + CSR12);
+			new_csr6 = 0x00420000;
+			outl(0x1F078, ioaddr + 0xB8);
+		}
+	} else if (tp->chip_id == DC21040) {					/* 21040 */
+		/* Turn on the xcvr interface. */
+		int csr12 = inl(ioaddr + CSR12);
+		if (tp->msg_level & NETIF_MSG_LINK)
+			printk(KERN_DEBUG "%s: 21040 media type is %s, CSR12 is %2.2x.\n",
+				   dev->name, medianame[dev->if_port], csr12);
+		if (media_cap[dev->if_port] & MediaAlwaysFD)
+			tp->full_duplex = 1;
+		new_csr6 = 0x20000;
+		/* Set the full duplux match frame. */
+		outl(FULL_DUPLEX_MAGIC, ioaddr + CSR11);
+		outl(0x00000000, ioaddr + CSR13); /* Reset the serial interface */
+		if (t21040_csr13[dev->if_port] & 8) {
+			outl(0x0705, ioaddr + CSR14);
+			outl(0x0006, ioaddr + CSR15);
+		} else {
+			outl(0xffff, ioaddr + CSR14);
+			outl(0x0000, ioaddr + CSR15);
+		}
+		outl(0x8f01 | t21040_csr13[dev->if_port], ioaddr + CSR13);
+	} else {					/* Unknown chip type with no media table. */
+		if (tp->default_port == 0)
+			dev->if_port = tp->mii_cnt ? 11 : 3;
+		if (media_cap[dev->if_port] & MediaIsMII) {
+			new_csr6 = 0x020E0000;
+		} else if (media_cap[dev->if_port] & MediaIsFx) {
+			new_csr6 = 0x02860000;
+		} else
+			new_csr6 = 0x038E0000;
+		if (tp->msg_level & NETIF_MSG_LINK)
+			printk(KERN_DEBUG "%s: No media description table, assuming "
+				   "%s transceiver, CSR12 %2.2x.\n",
+				   dev->name, medianame[dev->if_port],
+				   (int)inl(ioaddr + CSR12));
+	}
+
+	tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) |
+		(tp->full_duplex ? FullDuplex : 0);
+	return;
+}
+
+/*
+  Check the MII negotiated duplex, and change the CSR6 setting if
+  required.
+  Return 0 if everything is OK.
+  Return < 0 if the transceiver is missing or has no link beat.
+  */
+static int check_duplex(struct net_device *dev)
+{
+	long ioaddr = dev->base_addr;
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	int mii_reg1, mii_reg5, negotiated, duplex;
+
+	if (tp->full_duplex_lock)
+		return 0;
+	mii_reg5 = mdio_read(dev, tp->phys[0], 5);
+	negotiated = mii_reg5 & tp->mii_advertise;
+
+	if (tp->msg_level & NETIF_MSG_TIMER)
+		printk(KERN_INFO "%s: MII link partner %4.4x, negotiated %4.4x.\n",
+			   dev->name, mii_reg5, negotiated);
+	if (mii_reg5 == 0xffff)
+		return -2;
+	if ((mii_reg5 & 0x4000) == 0  &&			/* No negotiation. */
+		((mii_reg1 = mdio_read(dev, tp->phys[0], 1)) & 0x0004) == 0) {
+		int new_reg1 = mdio_read(dev, tp->phys[0], 1);
+		if ((new_reg1 & 0x0004) == 0) {
+			if (tp->msg_level & NETIF_MSG_TIMER)
+				printk(KERN_INFO "%s: No link beat on the MII interface,"
+					   " status %4.4x.\n", dev->name, new_reg1);
+			return -1;
+		}
+	}
+	duplex = ((negotiated & 0x0300) == 0x0100
+			  || (negotiated & 0x00C0) == 0x0040);
+	/* 100baseTx-FD  or  10T-FD, but not 100-HD */
+	if (tp->full_duplex != duplex) {
+		tp->full_duplex = duplex;
+		if (negotiated & 0x0380)	/* 100mbps. */
+			tp->csr6 &= ~0x00400000;
+		if (tp->full_duplex) tp->csr6 |= FullDuplex;
+		else				 tp->csr6 &= ~FullDuplex;
+		outl(tp->csr6 | RxOn, ioaddr + CSR6);
+		outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
+		if (tp->msg_level & NETIF_MSG_LINK)
+			printk(KERN_INFO "%s: Setting %s-duplex based on MII "
+				   "#%d link partner capability of %4.4x.\n",
+				   dev->name, tp->full_duplex ? "full" : "half",
+				   tp->phys[0], mii_reg5);
+		return 1;
+	}
+	return 0;
+}
+
+static void tulip_timer(unsigned long data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	u32 csr12 = inl(ioaddr + CSR12);
+	int next_tick = 2*HZ;
+
+	if (tp->msg_level & NETIF_MSG_TIMER)
+		printk(KERN_DEBUG "%s: Media selection tick, %s, status %8.8x mode"
+			   " %8.8x SIA %8.8x %8.8x %8.8x %8.8x.\n",
+			   dev->name, medianame[dev->if_port], (int)inl(ioaddr + CSR5),
+			   (int)inl(ioaddr + CSR6), csr12, (int)inl(ioaddr + CSR13),
+			   (int)inl(ioaddr + CSR14), (int)inl(ioaddr + CSR15));
+
+	switch (tp->chip_id) {
+	case DC21040:
+		if (!tp->medialock  &&  (csr12 & 0x0002)) { /* Network error */
+			if (tp->msg_level & NETIF_MSG_TIMER)
+				printk(KERN_INFO "%s: No link beat found.\n",
+					   dev->name);
+			dev->if_port = (dev->if_port == 2 ? 0 : 2);
+			select_media(dev, 0);
+			dev->trans_start = jiffies;
+		}
+		break;
+	case DC21041:
+		if (tp->msg_level & NETIF_MSG_TIMER)
+			printk(KERN_DEBUG "%s: 21041 media tick  CSR12 %8.8x.\n",
+				   dev->name, csr12);
+		if (tp->medialock) break;
+		switch (dev->if_port) {
+		case 0: case 3: case 4:
+		  if (csr12 & 0x0004) { /*LnkFail */
+			/* 10baseT is dead.  Check for activity on alternate port. */
+			tp->mediasense = 1;
+			if (csr12 & 0x0200)
+				dev->if_port = 2;
+			else
+				dev->if_port = 1;
+			if (tp->msg_level & NETIF_MSG_LINK)
+				printk(KERN_INFO "%s: No 21041 10baseT link beat, Media "
+					   "switched to %s.\n",
+					   dev->name, medianame[dev->if_port]);
+			outl(0, ioaddr + CSR13); /* Reset */
+			outl(t21041_csr14[dev->if_port], ioaddr + CSR14);
+			outl(t21041_csr15[dev->if_port], ioaddr + CSR15);
+			outl(t21041_csr13[dev->if_port], ioaddr + CSR13);
+			next_tick = 10*HZ;			/* 2.4 sec. */
+		  } else
+			next_tick = 30*HZ;
+		  break;
+		case 1:					/* 10base2 */
+		case 2:					/* AUI */
+			if (csr12 & 0x0100) {
+				next_tick = (30*HZ);			/* 30 sec. */
+				tp->mediasense = 0;
+			} else if ((csr12 & 0x0004) == 0) {
+				if (tp->msg_level & NETIF_MSG_LINK)
+					printk(KERN_INFO "%s: 21041 media switched to 10baseT.\n",
+						   dev->name);
+				dev->if_port = 0;
+				select_media(dev, 0);
+				next_tick = (24*HZ)/10;				/* 2.4 sec. */
+			} else if (tp->mediasense || (csr12 & 0x0002)) {
+				dev->if_port = 3 - dev->if_port; /* Swap ports. */
+				select_media(dev, 0);
+				next_tick = 20*HZ;
+			} else {
+				next_tick = 20*HZ;
+			}
+			break;
+		}
+		break;
+	case DC21140:  case DC21142: case MX98713: case COMPEX9881: default: {
+		struct medialeaf *mleaf;
+		unsigned char *p;
+		if (tp->mtable == NULL) {	/* No EEPROM info, use generic code. */
+			/* Not much that can be done.
+			   Assume this a generic MII or SYM transceiver. */
+			next_tick = 60*HZ;
+			if (tp->msg_level & NETIF_MSG_TIMER)
+				printk(KERN_DEBUG "%s: network media monitor CSR6 %8.8x "
+					   "CSR12 0x%2.2x.\n",
+					   dev->name, (int)inl(ioaddr + CSR6), csr12 & 0xff);
+			break;
+		}
+		mleaf = &tp->mtable->mleaf[tp->cur_index];
+		p = mleaf->leafdata;
+		switch (mleaf->type) {
+		case 0: case 4: {
+			/* Type 0 serial or 4 SYM transceiver.  Check the link beat bit. */
+			int offset = mleaf->type == 4 ? 5 : 2;
+			s8 bitnum = p[offset];
+			if (p[offset+1] & 0x80) {
+				if (tp->msg_level & NETIF_MSG_TIMER)
+					printk(KERN_DEBUG"%s: Transceiver monitor tick "
+						   "CSR12=%#2.2x, no media sense.\n",
+						   dev->name, csr12);
+				if (mleaf->type == 4) {
+					if (mleaf->media == 3 && (csr12 & 0x02))
+						goto select_next_media;
+				}
+				break;
+			}
+			if (tp->msg_level & NETIF_MSG_TIMER)
+				printk(KERN_DEBUG "%s: Transceiver monitor tick: CSR12=%#2.2x"
+					   " bit %d is %d, expecting %d.\n",
+					   dev->name, csr12, (bitnum >> 1) & 7,
+					   (csr12 & (1 << ((bitnum >> 1) & 7))) != 0,
+					   (bitnum >= 0));
+			/* Check that the specified bit has the proper value. */
+			if ((bitnum < 0) !=
+				((csr12 & (1 << ((bitnum >> 1) & 7))) != 0)) {
+				if (tp->msg_level & NETIF_MSG_LINK)
+					printk(KERN_DEBUG "%s: Link beat detected for %s.\n",
+						   dev->name, medianame[mleaf->media & MEDIA_MASK]);
+				if ((p[2] & 0x61) == 0x01)	/* Bogus Znyx board. */
+					goto actually_mii;
+				break;
+			}
+			if (tp->medialock)
+				break;
+	  select_next_media:
+			if (--tp->cur_index < 0) {
+				/* We start again, but should instead look for default. */
+				tp->cur_index = tp->mtable->leafcount - 1;
+			}
+			dev->if_port = tp->mtable->mleaf[tp->cur_index].media;
+			if (media_cap[dev->if_port] & MediaIsFD)
+				goto select_next_media; /* Skip FD entries. */
+			if (tp->msg_level & NETIF_MSG_LINK)
+				printk(KERN_DEBUG "%s: No link beat on media %s,"
+					   " trying transceiver type %s.\n",
+					   dev->name, medianame[mleaf->media & MEDIA_MASK],
+					   medianame[tp->mtable->mleaf[tp->cur_index].media]);
+			select_media(dev, 0);
+			/* Restart the transmit process. */
+			outl(tp->csr6 | RxOn, ioaddr + CSR6);
+			outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
+			next_tick = (24*HZ)/10;
+			break;
+		}
+		case 1:  case 3:		/* 21140, 21142 MII */
+		actually_mii:
+			check_duplex(dev);
+			next_tick = 60*HZ;
+			break;
+		case 2:					/* 21142 serial block has no link beat. */
+		default:
+			break;
+		}
+	}
+	break;
+	}
+	tp->timer.expires = jiffies + next_tick;
+	add_timer(&tp->timer);
+}
+
+/* Handle internal NWay transceivers uniquely.
+   These exist on the 21041, 21143 (in SYM mode) and the PNIC2.
+   */
+static void nway_timer(unsigned long data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int csr12 = inl(ioaddr + CSR12);
+	int next_tick = 60*HZ;
+	int new_csr6 = 0;
+
+	if (tp->msg_level & NETIF_MSG_TIMER)
+		printk(KERN_INFO"%s: N-Way autonegotiation status %8.8x, %s.\n",
+			   dev->name, csr12, medianame[dev->if_port]);
+	if (media_cap[dev->if_port] & MediaIsMII) {
+		check_duplex(dev);
+	} else if (tp->nwayset) {
+		/* Do not screw up a negotiated session! */
+		if (tp->msg_level & NETIF_MSG_TIMER)
+			printk(KERN_INFO"%s: Using NWay-set %s media, csr12 %8.8x.\n",
+				   dev->name, medianame[dev->if_port], csr12);
+	} else if (tp->medialock) {
+			;
+	} else if (dev->if_port == 3) {
+		if (csr12 & 2) {	/* No 100mbps link beat, revert to 10mbps. */
+			if (tp->msg_level & NETIF_MSG_LINK)
+				printk(KERN_INFO"%s: No 21143 100baseTx link beat, %8.8x, "
+					   "trying NWay.\n", dev->name, csr12);
+			nway_start(dev);
+			next_tick = 3*HZ;
+		}
+	} else if ((csr12 & 0x7000) != 0x5000) {
+		/* Negotiation failed.  Search media types. */
+		if (tp->msg_level & NETIF_MSG_LINK)
+			printk(KERN_INFO"%s: 21143 negotiation failed, status %8.8x.\n",
+				   dev->name, csr12);
+		if (!(csr12 & 4)) {		/* 10mbps link beat good. */
+			new_csr6 = 0x82420000;
+			dev->if_port = 0;
+			outl(0, ioaddr + CSR13);
+			outl(0x0003FFFF, ioaddr + CSR14);
+			outw(t21142_csr15[dev->if_port], ioaddr + CSR15);
+			outl(t21142_csr13[dev->if_port], ioaddr + CSR13);
+		} else {
+			/* Select 100mbps port to check for link beat. */
+			new_csr6 = 0x83860000;
+			dev->if_port = 3;
+			outl(0, ioaddr + CSR13);
+			outl(0x0003FF7F, ioaddr + CSR14);
+			outw(8, ioaddr + CSR15);
+			outl(1, ioaddr + CSR13);
+		}
+		if (tp->msg_level & NETIF_MSG_LINK)
+			printk(KERN_INFO"%s: Testing new 21143 media %s.\n",
+				   dev->name, medianame[dev->if_port]);
+		if (new_csr6 != (tp->csr6 & ~0x20D7)) {
+			tp->csr6 &= 0x20D7;
+			tp->csr6 |= new_csr6;
+			outl(0x0301, ioaddr + CSR12);
+			outl(tp->csr6 | RxOn, ioaddr + CSR6);
+			outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
+		}
+		next_tick = 3*HZ;
+	}
+	if (tp->cur_tx - tp->dirty_tx > 0  &&
+		jiffies - dev->trans_start > TX_TIMEOUT) {
+		printk(KERN_WARNING "%s: Tx hung, %d vs. %d.\n",
+			   dev->name, tp->cur_tx, tp->dirty_tx);
+		tulip_tx_timeout(dev);
+	}
+
+	tp->timer.expires = jiffies + next_tick;
+	add_timer(&tp->timer);
+}
+
+static void nway_start(struct net_device *dev)
+{
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int csr14 = ((tp->sym_advertise & 0x0780) << 9)  |
+		((tp->sym_advertise&0x0020)<<1) | 0xffbf;
+
+	dev->if_port = 0;
+	tp->nway = tp->mediasense = 1;
+	tp->nwayset = tp->lpar = 0;
+	if (tp->chip_id == PNIC2) {
+		tp->csr6 = 0x01000000 | (tp->sym_advertise & 0x0040 ? FullDuplex : 0);
+		return;
+	}
+	if (tp->msg_level & NETIF_MSG_LINK)
+		printk(KERN_DEBUG "%s: Restarting internal NWay autonegotiation, "
+			   "%8.8x.\n", dev->name, csr14);
+	outl(0x0001, ioaddr + CSR13);
+	outl(csr14, ioaddr + CSR14);
+	tp->csr6 = 0x82420000 | (tp->sym_advertise & 0x0040 ? FullDuplex : 0)
+		| (tp->csr6 & 0x20ff);
+	outl(tp->csr6, ioaddr + CSR6);
+	if (tp->mtable  &&  tp->mtable->csr15dir) {
+		outl(tp->mtable->csr15dir, ioaddr + CSR15);
+		outl(tp->mtable->csr15val, ioaddr + CSR15);
+	} else if (tp->chip_id != PNIC2)
+		outw(0x0008, ioaddr + CSR15);
+	if (tp->chip_id == DC21041)			/* Trigger NWAY. */
+		outl(0xEF01, ioaddr + CSR12);
+	else
+		outl(0x1301, ioaddr + CSR12);
+}
+
+static void nway_lnk_change(struct net_device *dev, int csr5)
+{
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int csr12 = inl(ioaddr + CSR12);
+
+	if (tp->chip_id == PNIC2) {
+		if (tp->msg_level & NETIF_MSG_LINK)
+			printk(KERN_INFO"%s: PNIC-2 link status changed, CSR5/12/14 %8.8x"
+				   " %8.8x, %8.8x.\n",
+				   dev->name, csr12, csr5, (int)inl(ioaddr + CSR14));
+		dev->if_port = 5;
+		tp->lpar = csr12 >> 16;
+		tp->nwayset = 1;
+		tp->csr6 = 0x01000000 | (tp->csr6 & 0xffff);
+		outl(tp->csr6, ioaddr + CSR6);
+		return;
+	}
+	if (tp->msg_level & NETIF_MSG_LINK)
+		printk(KERN_INFO"%s: 21143 link status interrupt %8.8x, CSR5 %x, "
+			   "%8.8x.\n", dev->name, csr12, csr5, (int)inl(ioaddr + CSR14));
+
+	/* If NWay finished and we have a negotiated partner capability. */
+	if (tp->nway  &&  !tp->nwayset  &&  (csr12 & 0x7000) == 0x5000) {
+		int setup_done = 0;
+		int negotiated = tp->sym_advertise & (csr12 >> 16);
+		tp->lpar = csr12 >> 16;
+		tp->nwayset = 1;
+		if (negotiated & 0x0100)		dev->if_port = 5;
+		else if (negotiated & 0x0080)	dev->if_port = 3;
+		else if (negotiated & 0x0040)	dev->if_port = 4;
+		else if (negotiated & 0x0020)	dev->if_port = 0;
+		else {
+			tp->nwayset = 0;
+			if ((csr12 & 2) == 0  &&  (tp->sym_advertise & 0x0180))
+				dev->if_port = 3;
+		}
+		tp->full_duplex = (media_cap[dev->if_port] & MediaAlwaysFD) ? 1:0;
+
+		if (tp->msg_level & NETIF_MSG_LINK) {
+			if (tp->nwayset)
+				printk(KERN_INFO "%s: Switching to %s based on link "
+					   "negotiation %4.4x & %4.4x = %4.4x.\n",
+					   dev->name, medianame[dev->if_port], tp->sym_advertise,
+					   tp->lpar, negotiated);
+			else
+				printk(KERN_INFO "%s: Autonegotiation failed, using %s,"
+					   " link beat status %4.4x.\n",
+					   dev->name, medianame[dev->if_port], csr12);
+		}
+
+		if (tp->mtable) {
+			int i;
+			for (i = 0; i < tp->mtable->leafcount; i++)
+				if (tp->mtable->mleaf[i].media == dev->if_port) {
+					tp->cur_index = i;
+					select_media(dev, 0);
+					setup_done = 1;
+					break;
+				}
+		}
+		if ( ! setup_done) {
+			tp->csr6 = (dev->if_port & 1 ? 0x838E0000 : 0x82420000)
+				| (tp->csr6 & 0x20ff);
+			if (tp->full_duplex)
+				tp->csr6 |= FullDuplex;
+			outl(1, ioaddr + CSR13);
+		}
+#if 0							/* Restart should not be needed. */
+		outl(tp->csr6 | 0x0000, ioaddr + CSR6);
+		if (tp->msg_level & NETIF_MSG_LINK)
+			printk(KERN_DEBUG "%s:  Restarting Tx and Rx, CSR5 is %8.8x.\n",
+				   dev->name, inl(ioaddr + CSR5));
+#endif
+		outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
+		if (tp->msg_level & NETIF_MSG_LINK)
+			printk(KERN_DEBUG "%s:  Setting CSR6 %8.8x/%x CSR12 %8.8x.\n",
+				   dev->name, tp->csr6, (int)inl(ioaddr + CSR6),
+				   (int)inl(ioaddr + CSR12));
+	} else if ((tp->nwayset  &&  (csr5 & 0x08000000)
+				&& (dev->if_port == 3  ||  dev->if_port == 5)
+				&& (csr12 & 2) == 2) ||
+			   (tp->nway && (csr5 & (TPLnkFail)))) {
+		/* Link blew? Maybe restart NWay. */
+		del_timer(&tp->timer);
+		nway_start(dev);
+		tp->timer.expires = jiffies + 3*HZ;
+		add_timer(&tp->timer);
+	} else if (dev->if_port == 3  ||  dev->if_port == 5) {
+		if (tp->msg_level & NETIF_MSG_LINK)	/* TIMER? */
+			printk(KERN_INFO"%s: 21143 %s link beat %s.\n",
+				   dev->name, medianame[dev->if_port],
+				   (csr12 & 2) ? "failed" : "good");
+		if ((csr12 & 2)  &&  ! tp->medialock) {
+			del_timer(&tp->timer);
+			nway_start(dev);
+			tp->timer.expires = jiffies + 3*HZ;
+			add_timer(&tp->timer);
+		} else if (dev->if_port == 5)
+			outl(inl(ioaddr + CSR14) & ~0x080, ioaddr + CSR14);
+	} else if (dev->if_port == 0  ||  dev->if_port == 4) {
+		if ((csr12 & 4) == 0)
+			printk(KERN_INFO"%s: 21143 10baseT link beat good.\n",
+				   dev->name);
+	} else if (!(csr12 & 4)) {		/* 10mbps link beat good. */
+		if (tp->msg_level & NETIF_MSG_LINK)
+			printk(KERN_INFO"%s: 21143 10mbps sensed media.\n",
+				   dev->name);
+		dev->if_port = 0;
+	} else if (tp->nwayset) {
+		if (tp->msg_level & NETIF_MSG_LINK)
+			printk(KERN_INFO"%s: 21143 using NWay-set %s, csr6 %8.8x.\n",
+				   dev->name, medianame[dev->if_port], tp->csr6);
+	} else {		/* 100mbps link beat good. */
+		if (tp->msg_level & NETIF_MSG_LINK)
+			printk(KERN_INFO"%s: 21143 100baseTx sensed media.\n",
+				   dev->name);
+		dev->if_port = 3;
+		tp->csr6 = 0x838E0000 | (tp->csr6 & 0x20ff);
+		outl(0x0003FF7F, ioaddr + CSR14);
+		outl(0x0301, ioaddr + CSR12);
+		outl(tp->csr6 | RxOn, ioaddr + CSR6);
+		outl(tp->csr6 | RxOn | TxOn, ioaddr + CSR6);
+	}
+}
+
+static void mxic_timer(unsigned long data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int next_tick = 60*HZ;
+
+	if (tp->msg_level & NETIF_MSG_TIMER) {
+		printk(KERN_INFO"%s: MXIC negotiation status %8.8x.\n", dev->name,
+			   (int)inl(ioaddr + CSR12));
+	}
+	tp->timer.expires = jiffies + next_tick;
+	add_timer(&tp->timer);
+}
+
+static void pnic_do_nway(struct net_device *dev)
+{
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	u32 phy_reg = inl(ioaddr + 0xB8);
+	u32 new_csr6 = tp->csr6 & ~0x40C40200;
+
+	if (phy_reg & 0x78000000) { /* Ignore baseT4 */
+		if (phy_reg & 0x20000000)		dev->if_port = 5;
+		else if (phy_reg & 0x40000000)	dev->if_port = 3;
+		else if (phy_reg & 0x10000000)	dev->if_port = 4;
+		else if (phy_reg & 0x08000000)	dev->if_port = 0;
+		tp->nwayset = 1;
+		new_csr6 = (dev->if_port & 1) ? 0x01860000 : 0x00420000;
+		outl(0x32 | (dev->if_port & 1), ioaddr + CSR12);
+		if (dev->if_port & 1)
+			outl(0x1F868, ioaddr + 0xB8);
+		if (phy_reg & 0x30000000) {
+			tp->full_duplex = 1;
+			new_csr6 |= FullDuplex;
+		}
+		if (tp->msg_level & NETIF_MSG_LINK)
+			printk(KERN_DEBUG "%s: PNIC autonegotiated status %8.8x, %s.\n",
+				   dev->name, phy_reg, medianame[dev->if_port]);
+		if (tp->csr6 != new_csr6) {
+			tp->csr6 = new_csr6;
+			outl(tp->csr6 | RxOn, ioaddr + CSR6);	/* Restart Tx */
+			outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
+			dev->trans_start = jiffies;
+		}
+	}
+}
+
+static void pnic_lnk_change(struct net_device *dev, int csr5)
+{
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int phy_reg = inl(ioaddr + 0xB8);
+
+	if (tp->msg_level & NETIF_MSG_LINK)
+		printk(KERN_DEBUG "%s: PNIC link changed state %8.8x, CSR5 %8.8x.\n",
+			   dev->name, phy_reg, csr5);
+	if (inl(ioaddr + CSR5) & TPLnkFail) {
+		outl((inl(ioaddr + CSR7) & ~TPLnkFail) | TPLnkPass, ioaddr + CSR7);
+		if (! tp->nwayset  ||  jiffies - dev->trans_start > 1*HZ) {
+			tp->csr6 = 0x00420000 | (tp->csr6 & 0x0000fdff);
+			outl(tp->csr6, ioaddr + CSR6);
+			outl(0x30, ioaddr + CSR12);
+			outl(0x0201F078, ioaddr + 0xB8); /* Turn on autonegotiation. */
+			dev->trans_start = jiffies;
+		}
+	} else if (inl(ioaddr + CSR5) & TPLnkPass) {
+		pnic_do_nway(dev);
+		outl((inl(ioaddr + CSR7) & ~TPLnkPass) | TPLnkFail, ioaddr + CSR7);
+	}
+}
+static void pnic_timer(unsigned long data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int next_tick = 60*HZ;
+
+	if (media_cap[dev->if_port] & MediaIsMII) {
+		if (check_duplex(dev) > 0)
+			next_tick = 3*HZ;
+	} else {
+		int csr12 = inl(ioaddr + CSR12);
+		int new_csr6 = tp->csr6 & ~0x40C40200;
+		int phy_reg = inl(ioaddr + 0xB8);
+		int csr5 = inl(ioaddr + CSR5);
+
+		if (tp->msg_level & NETIF_MSG_TIMER)
+			printk(KERN_DEBUG "%s: PNIC timer PHY status %8.8x, %s "
+				   "CSR5 %8.8x.\n",
+				   dev->name, phy_reg, medianame[dev->if_port], csr5);
+		if (phy_reg & 0x04000000) {	/* Remote link fault */
+			outl(0x0201F078, ioaddr + 0xB8);
+			next_tick = 1*HZ;
+			tp->nwayset = 0;
+		} else if (phy_reg & 0x78000000) { /* Ignore baseT4 */
+			pnic_do_nway(dev);
+			next_tick = 60*HZ;
+		} else if (csr5 & TPLnkFail) { /* 100baseTx link beat */
+			if (tp->msg_level & NETIF_MSG_LINK)
+				printk(KERN_DEBUG "%s: %s link beat failed, CSR12 %4.4x, "
+					   "CSR5 %8.8x, PHY %3.3x.\n",
+					   dev->name, medianame[dev->if_port], csr12,
+					   (int)inl(ioaddr + CSR5), (int)inl(ioaddr + 0xB8));
+			next_tick = 3*HZ;
+			if (tp->medialock) {
+			} else if (tp->nwayset  &&  (dev->if_port & 1)) {
+				next_tick = 1*HZ;
+			} else if (dev->if_port == 0) {
+				dev->if_port = 3;
+				outl(0x33, ioaddr + CSR12);
+				new_csr6 = 0x01860000;
+				outl(0x1F868, ioaddr + 0xB8);
+			} else {
+				dev->if_port = 0;
+				outl(0x32, ioaddr + CSR12);
+				new_csr6 = 0x00420000;
+				outl(0x1F078, ioaddr + 0xB8);
+			}
+			if (tp->csr6 != new_csr6) {
+				tp->csr6 = new_csr6;
+				outl(tp->csr6 | RxOn, ioaddr + CSR6);	/* Restart Tx */
+				outl(tp->csr6 | RxOn | TxOn, ioaddr + CSR6);
+				dev->trans_start = jiffies;
+				if (tp->msg_level & NETIF_MSG_LINK)
+					printk(KERN_INFO "%s: Changing PNIC configuration to %s "
+						   "%s-duplex, CSR6 %8.8x.\n",
+						   dev->name, medianame[dev->if_port],
+						   tp->full_duplex ? "full" : "half", new_csr6);
+			}
+		}
+	}
+	tp->timer.expires = jiffies + next_tick;
+	add_timer(&tp->timer);
+}
+
+static void comet_timer(unsigned long data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	int next_tick = 60*HZ;
+
+	if (tp->msg_level & NETIF_MSG_TIMER)
+		printk(KERN_DEBUG "%s: Comet link status %4.4x partner capability "
+			   "%4.4x.\n",
+			   dev->name, mdio_read(dev, tp->phys[0], 1),
+			   mdio_read(dev, tp->phys[0], 5));
+	check_duplex(dev);
+	tp->timer.expires = jiffies + next_tick;
+	add_timer(&tp->timer);
+}
+
+static void tulip_tx_timeout(struct net_device *dev)
+{
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+
+	if (media_cap[dev->if_port] & MediaIsMII) {
+		/* Do nothing -- the media monitor should handle this. */
+		int mii_bmsr = mdio_read(dev, tp->phys[0], 1);
+		if (tp->msg_level & NETIF_MSG_LINK)
+			printk(KERN_WARNING "%s: Transmit timeout using MII device,"
+				   " status %4.4x.\n",
+				   dev->name, mii_bmsr);
+		if ( ! (mii_bmsr & 0x0004)) {		/* No link beat present */
+			dev->trans_start = jiffies;
+			netif_link_down(dev);
+			return;
+		}
+	} else switch (tp->chip_id) {
+	case DC21040:
+		if ( !tp->medialock  &&  inl(ioaddr + CSR12) & 0x0002) {
+			dev->if_port = (dev->if_port == 2 ? 0 : 2);
+			printk(KERN_INFO "%s: transmit timed out, switching to "
+				   "%s.\n",
+				   dev->name, medianame[dev->if_port]);
+			select_media(dev, 0);
+		}
+		dev->trans_start = jiffies;
+		return;					/* Note: not break! */
+	case DC21041: {
+		int csr12 = inl(ioaddr + CSR12);
+
+		printk(KERN_WARNING "%s: 21041 transmit timed out, status %8.8x, "
+			   "CSR12 %8.8x, CSR13 %8.8x, CSR14 %8.8x, resetting...\n",
+			   dev->name, (int)inl(ioaddr + CSR5), csr12,
+			   (int)inl(ioaddr + CSR13), (int)inl(ioaddr + CSR14));
+		tp->mediasense = 1;
+		if ( ! tp->medialock) {
+			if (dev->if_port == 1 || dev->if_port == 2)
+				dev->if_port = (csr12 & 0x0004) ? 2 - dev->if_port : 0;
+			else
+				dev->if_port = 1;
+			select_media(dev, 0);
+		}
+		break;
+	}
+	case DC21142:
+		if (tp->nwayset) {
+			printk(KERN_WARNING "%s: Transmit timed out, status %8.8x, "
+				   "SIA %8.8x %8.8x %8.8x %8.8x, restarting NWay .\n",
+				   dev->name, (int)inl(ioaddr + CSR5),
+				   (int)inl(ioaddr + CSR12), (int)inl(ioaddr + CSR13),
+				   (int)inl(ioaddr + CSR14), (int)inl(ioaddr + CSR15));
+			nway_start(dev);
+			break;
+		}
+		/* Fall through. */
+	case DC21140: case MX98713: case COMPEX9881:
+		printk(KERN_WARNING "%s: %s transmit timed out, status %8.8x, "
+			   "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n",
+			   dev->name, tulip_tbl[tp->chip_id].chip_name,
+			   (int)inl(ioaddr + CSR5), (int)inl(ioaddr + CSR12),
+			   (int)inl(ioaddr + CSR13), (int)inl(ioaddr + CSR14),
+			   (int)inl(ioaddr + CSR15));
+		if ( ! tp->medialock  &&  tp->mtable) {
+			do
+				--tp->cur_index;
+			while (tp->cur_index >= 0
+				   && (media_cap[tp->mtable->mleaf[tp->cur_index].media]
+					   & MediaIsFD));
+			if (tp->cur_index < 0) {
+				/* We start again, but should instead look for default. */
+				tp->cur_index = tp->mtable->leafcount - 1;
+			}
+			select_media(dev, 0);
+			printk(KERN_WARNING "%s: transmit timed out, switching to %s "
+				   "media.\n", dev->name, medianame[dev->if_port]);
+		}
+		break;
+	case PNIC2:
+		printk(KERN_WARNING "%s: PNIC2 transmit timed out, status %8.8x, "
+			   "CSR6/7 %8.8x / %8.8x CSR12 %8.8x, resetting...\n",
+			   dev->name, (int)inl(ioaddr + CSR5), (int)inl(ioaddr + CSR6),
+			   (int)inl(ioaddr + CSR7), (int)inl(ioaddr + CSR12));
+		break;
+	default:
+		printk(KERN_WARNING "%s: Transmit timed out, status %8.8x, CSR12 "
+			   "%8.8x, resetting...\n",
+			   dev->name, (int)inl(ioaddr + CSR5), (int)inl(ioaddr + CSR12));
+	}
+
+#if defined(way_too_many_messages)  &&  defined(__i386__)
+	if (tp->msg_level & NETIF_MSG_TXERR) {
+		int i;
+		for (i = 0; i < RX_RING_SIZE; i++) {
+			u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
+			int j;
+			printk(KERN_DEBUG "%2d: %8.8x %8.8x %8.8x %8.8x  "
+				   "%2.2x %2.2x %2.2x.\n",
+				   i, (unsigned int)tp->rx_ring[i].status,
+				   (unsigned int)tp->rx_ring[i].length,
+				   (unsigned int)tp->rx_ring[i].buffer1,
+				   (unsigned int)tp->rx_ring[i].buffer2,
+				   buf[0], buf[1], buf[2]);
+			for (j = 0; buf[j] != 0xee && j < 1600; j++)
+				if (j < 100) printk(" %2.2x", buf[j]);
+			printk(" j=%d.\n", j);
+		}
+		printk(KERN_DEBUG "  Rx ring %8.8x: ", (int)tp->rx_ring);
+		for (i = 0; i < RX_RING_SIZE; i++)
+			printk(" %8.8x", (unsigned int)tp->rx_ring[i].status);
+		printk("\n" KERN_DEBUG "  Tx ring %8.8x: ", (int)tp->tx_ring);
+		for (i = 0; i < TX_RING_SIZE; i++)
+			printk(" %8.8x", (unsigned int)tp->tx_ring[i].status);
+		printk("\n");
+	}
+#endif
+
+	/* Stop and restart the Tx process.
+	   The pwr_event approach of empty/init_rings() may be better... */
+	outl(tp->csr6 | RxOn, ioaddr + CSR6);
+	outl(tp->csr6 | RxOn | TxOn, ioaddr + CSR6);
+	/* Trigger an immediate transmit demand. */
+	outl(0, ioaddr + CSR1);
+	outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+
+	dev->trans_start = jiffies;
+	tp->stats.tx_errors++;
+	return;
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void tulip_init_ring(struct net_device *dev)
+{
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	int i;
+
+	tp->rx_dead = tp->tx_full = 0;
+	tp->cur_rx = tp->cur_tx = 0;
+	tp->dirty_rx = tp->dirty_tx = 0;
+
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		tp->rx_ring[i].status = 0x00000000;
+		tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
+		tp->rx_ring[i].buffer2 = virt_to_le32desc(&tp->rx_ring[i+1]);
+		tp->rx_skbuff[i] = NULL;
+	}
+	/* Mark the last entry as wrapping the ring. */
+	tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
+	tp->rx_ring[i-1].buffer2 = virt_to_le32desc(&tp->rx_ring[0]);
+
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		/* Note the receive buffer must be longword aligned.
+		   dev_alloc_skb() provides 16 byte alignment.  But do *not*
+		   use skb_reserve() to align the IP header! */
+		struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
+		tp->rx_skbuff[i] = skb;
+		if (skb == NULL)
+			break;
+		skb->dev = dev;			/* Mark as being used by this device. */
+		tp->rx_ring[i].status = cpu_to_le32(DescOwned);
+		tp->rx_ring[i].buffer1 = virt_to_le32desc(skb->tail);
+	}
+	tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+	/* The Tx buffer descriptor is filled in as needed, but we
+	   do need to clear the ownership bit. */
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		tp->tx_skbuff[i] = 0;
+		tp->tx_ring[i].status = 0x00000000;
+		tp->tx_ring[i].buffer2 = virt_to_le32desc(&tp->tx_ring[i+1]);
+	}
+	tp->tx_ring[i-1].buffer2 = virt_to_le32desc(&tp->tx_ring[0]);
+}
+
+static int
+tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	int entry, q_used_cnt;
+	u32 flag;
+
+	/* Block a timer-based transmit from overlapping.  This happens when
+	   packets are presumed lost, and we use this check the Tx status. */
+	if (netif_pause_tx_queue(dev) != 0) {
+		/* This watchdog code is redundant with the media monitor timer. */
+		if (jiffies - dev->trans_start > TX_TIMEOUT)
+			tulip_tx_timeout(dev);
+		return 1;
+	}
+
+	/* Caution: the write order is important here, set the field
+	   with the ownership bits last. */
+
+	/* Calculate the next Tx descriptor entry. */
+	entry = tp->cur_tx % TX_RING_SIZE;
+	q_used_cnt = tp->cur_tx - tp->dirty_tx;
+
+	tp->tx_skbuff[entry] = skb;
+	tp->tx_ring[entry].buffer1 = virt_to_le32desc(skb->data);
+
+	if (q_used_cnt < TX_QUEUE_LEN/2) {/* Typical path */
+		flag = 0x60000000; /* No interrupt */
+	} else if (q_used_cnt == TX_QUEUE_LEN/2) {
+		flag = 0xe0000000; /* Tx-done intr. */
+	} else if (q_used_cnt < TX_QUEUE_LEN) {
+		flag = 0x60000000; /* No Tx-done intr. */
+	} else {		/* Leave room for set_rx_mode() to fill entries. */
+		tp->tx_full = 1;
+		flag = 0xe0000000; /* Tx-done intr. */
+	}
+	if (entry == TX_RING_SIZE-1)
+		flag = 0xe0000000 | DESC_RING_WRAP;
+
+	tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
+	tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
+	tp->cur_tx++;
+	if ( ! tp->tx_full)
+		netif_unpause_tx_queue(dev);
+	else {
+		netif_stop_tx_queue(dev);
+		/* Check for a just-cleared queue race.
+		   Note that this code path differs from other drivers because we
+		   set the tx_full flag early. */
+		if ( ! tp->tx_full)
+			netif_resume_tx_queue(dev);
+	}
+
+	dev->trans_start = jiffies;
+	/* Trigger an immediate transmit demand. */
+	outl(0, dev->base_addr + CSR1);
+
+	return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+   after the Tx thread. */
+static void tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+{
+	struct net_device *dev = (struct net_device *)dev_instance;
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int csr5, work_budget = max_interrupt_work;
+
+	do {
+		csr5 = inl(ioaddr + CSR5);
+		if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
+			break;
+
+		if (tp->msg_level & NETIF_MSG_INTR)
+			printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x.\n",
+				   dev->name, csr5, (int)inl(dev->base_addr + CSR5));
+		/* Acknowledge all of the current interrupt sources ASAP. */
+		outl(csr5 & 0x0001ffff, ioaddr + CSR5);
+
+		if (csr5 & (RxIntr | RxNoBuf))
+			work_budget -= tulip_rx(dev);
+
+		if (csr5 & (TxNoBuf | TxDied | TxIntr)) {
+			unsigned int dirty_tx;
+
+			for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
+				 dirty_tx++) {
+				int entry = dirty_tx % TX_RING_SIZE;
+				int status = le32_to_cpu(tp->tx_ring[entry].status);
+
+				if (status < 0)
+					break;			/* It still has not been Txed */
+				/* Check for Rx filter setup frames. */
+				if (tp->tx_skbuff[entry] == NULL)
+				  continue;
+
+				if (status & 0x8000) {
+					/* There was an major error, log it. */
+					if (tp->msg_level & NETIF_MSG_TX_ERR)
+						printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+							   dev->name, status);
+					tp->stats.tx_errors++;
+					if (status & 0x4104) tp->stats.tx_aborted_errors++;
+					if (status & 0x0C00) tp->stats.tx_carrier_errors++;
+					if (status & 0x0200) tp->stats.tx_window_errors++;
+					if (status & 0x0002) tp->stats.tx_fifo_errors++;
+					if ((status & 0x0080) && tp->full_duplex == 0)
+						tp->stats.tx_heartbeat_errors++;
+#ifdef ETHER_STATS
+					if (status & 0x0100) tp->stats.collisions16++;
+#endif
+				} else {
+					if (tp->msg_level & NETIF_MSG_TX_DONE)
+						printk(KERN_DEBUG "%s: Transmit complete, status "
+							   "%8.8x.\n", dev->name, status);
+#ifdef ETHER_STATS
+					if (status & 0x0001) tp->stats.tx_deferred++;
+#endif
+#if LINUX_VERSION_CODE > 0x20127
+					tp->stats.tx_bytes += tp->tx_skbuff[entry]->len;
+#endif
+					tp->stats.collisions += (status >> 3) & 15;
+					tp->stats.tx_packets++;
+				}
+
+				/* Free the original skb. */
+				dev_free_skb_irq(tp->tx_skbuff[entry]);
+				tp->tx_skbuff[entry] = 0;
+			}
+
+#ifndef final_version
+			if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
+				printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+					   dev->name, dirty_tx, tp->cur_tx, tp->tx_full);
+				dirty_tx += TX_RING_SIZE;
+			}
+#endif
+
+			if (tp->tx_full && tp->cur_tx - dirty_tx  < TX_QUEUE_LEN - 4) {
+				/* The ring is no longer full, clear tbusy. */
+				tp->tx_full = 0;
+				netif_resume_tx_queue(dev);
+			}
+
+			tp->dirty_tx = dirty_tx;
+		}
+
+		if (tp->rx_dead) {
+			tulip_rx(dev);
+			if (tp->cur_rx - tp->dirty_rx < RX_RING_SIZE - 3) {
+				printk(KERN_ERR "%s: Restarted Rx at %d / %d.\n",
+					   dev->name, tp->cur_rx, tp->dirty_rx);
+				outl(0, ioaddr + CSR2);		/* Rx poll demand */
+				tp->rx_dead = 0;
+			}
+		}
+
+		/* Log errors. */
+		if (csr5 & AbnormalIntr) {	/* Abnormal error summary bit. */
+			if (csr5 == 0xffffffff)
+				break;
+			if (csr5 & TxJabber) tp->stats.tx_errors++;
+			if (csr5 & PCIBusError) {
+				printk(KERN_ERR "%s: PCI Fatal Bus Error, %8.8x.\n",
+					   dev->name, csr5);
+			}
+			if (csr5 & TxFIFOUnderflow) {
+				if ((tp->csr6 & 0xC000) != 0xC000)
+					tp->csr6 += 0x4000;	/* Bump up the Tx threshold */
+				else
+					tp->csr6 |= 0x00200000;  /* Store-n-forward. */
+				if (tp->msg_level & NETIF_MSG_TX_ERR)
+					printk(KERN_WARNING "%s: Tx threshold increased, "
+						   "new CSR6 %x.\n", dev->name, tp->csr6);
+			}
+			if (csr5 & TxDied) {
+				/* This is normal when changing Tx modes. */
+				if (tp->msg_level & NETIF_MSG_LINK)
+					printk(KERN_WARNING "%s: The transmitter stopped."
+						   "  CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
+						   dev->name, csr5, (int)inl(ioaddr + CSR6), tp->csr6);
+			}
+			if (csr5 & (TxDied | TxFIFOUnderflow | PCIBusError)) {
+				/* Restart the transmit process. */
+				outl(tp->csr6 | RxOn, ioaddr + CSR6);
+				outl(tp->csr6 | RxOn | TxOn, ioaddr + CSR6);
+			}
+			if (csr5 & (RxStopped | RxNoBuf)) {
+				/* Missed a Rx frame or mode change. */
+				tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
+				if (tp->flags & COMET_MAC_ADDR) {
+					outl(tp->mc_filter[0], ioaddr + 0xAC);
+					outl(tp->mc_filter[1], ioaddr + 0xB0);
+				}
+				tulip_rx(dev);
+				if (csr5 & RxNoBuf)
+					tp->rx_dead = 1;
+				outl(tp->csr6 | RxOn | TxOn, ioaddr + CSR6);
+			}
+			if (csr5 & TimerInt) {
+				if (tp->msg_level & NETIF_MSG_INTR)
+					printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
+						   dev->name, csr5);
+				outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+			}
+			if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
+				if (tp->link_change)
+					(tp->link_change)(dev, csr5);
+			}
+			/* Clear all error sources, included undocumented ones! */
+			outl(0x0800f7ba, ioaddr + CSR5);
+		}
+		if (--work_budget < 0) {
+			if (tp->msg_level & NETIF_MSG_DRV)
+				printk(KERN_WARNING "%s: Too much work during an interrupt, "
+					   "csr5=0x%8.8x.\n", dev->name, csr5);
+			/* Acknowledge all interrupt sources. */
+			outl(0x8001ffff, ioaddr + CSR5);
+			if (tp->flags & HAS_INTR_MITIGATION) {
+				/* Josip Loncaric at ICASE did extensive experimentation
+				   to develop a good interrupt mitigation setting.*/
+				outl(0x8b240000, ioaddr + CSR11);
+			} else {
+				/* Mask all interrupting sources, set timer to re-enable. */
+				outl(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt,
+					 ioaddr + CSR7);
+				outl(0x0012, ioaddr + CSR11);
+			}
+			break;
+		}
+	} while (1);
+
+	if (tp->msg_level & NETIF_MSG_INTR)
+		printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
+			   dev->name, (int)inl(ioaddr + CSR5));
+
+	return;
+}
+
+static int tulip_rx(struct net_device *dev)
+{
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	int entry = tp->cur_rx % RX_RING_SIZE;
+	int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
+	int work_done = 0;
+
+	if (tp->msg_level & NETIF_MSG_RX_STATUS)
+		printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
+			   tp->rx_ring[entry].status);
+	/* If we own the next entry, it is a new packet. Send it up. */
+	while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
+		s32 status = le32_to_cpu(tp->rx_ring[entry].status);
+
+		if (tp->msg_level & NETIF_MSG_RX_STATUS)
+			printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
+				   dev->name, entry, status);
+		if (--rx_work_limit < 0)
+			break;
+		if ((status & 0x38008300) != 0x0300) {
+			if ((status & 0x38000300) != 0x0300) {
+				/* Ingore earlier buffers. */
+				if ((status & 0xffff) != 0x7fff) {
+					if (tp->msg_level & NETIF_MSG_RX_ERR)
+						printk(KERN_WARNING "%s: Oversized Ethernet frame "
+							   "spanned multiple buffers, status %8.8x!\n",
+							   dev->name, status);
+					tp->stats.rx_length_errors++;
+				}
+			} else if (status & RxDescFatalErr) {
+				/* There was a fatal error. */
+				if (tp->msg_level & NETIF_MSG_RX_ERR)
+					printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
+						   dev->name, status);
+				tp->stats.rx_errors++; /* end of a packet.*/
+				if (status & 0x0890) tp->stats.rx_length_errors++;
+				if (status & 0x0004) tp->stats.rx_frame_errors++;
+				if (status & 0x0002) tp->stats.rx_crc_errors++;
+				if (status & 0x0001) tp->stats.rx_fifo_errors++;
+			}
+		} else {
+			/* Omit the four octet CRC from the length. */
+			short pkt_len = ((status >> 16) & 0x7ff) - 4;
+			struct sk_buff *skb;
+
+#ifndef final_version
+			if (pkt_len > 1518) {
+				printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
+					   dev->name, pkt_len, pkt_len);
+				pkt_len = 1518;
+				tp->stats.rx_length_errors++;
+			}
+#endif
+			/* Check if the packet is long enough to accept without copying
+			   to a minimally-sized skbuff. */
+			if (pkt_len < rx_copybreak
+				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+				skb->dev = dev;
+				skb_reserve(skb, 2);	/* 16 byte align the IP header */
+#if (LINUX_VERSION_CODE >= 0x20100)
+				eth_copy_and_sum(skb, tp->rx_skbuff[entry]->tail, pkt_len, 0);
+				skb_put(skb, pkt_len);
+#else
+				memcpy(skb_put(skb, pkt_len), tp->rx_skbuff[entry]->tail,
+					   pkt_len);
+#endif
+				work_done++;
+			} else {	/* Pass up the skb already on the Rx ring. */
+				skb_put(skb = tp->rx_skbuff[entry], pkt_len);
+				tp->rx_skbuff[entry] = NULL;
+			}
+			skb->protocol = eth_type_trans(skb, dev);
+			netif_rx(skb);
+			dev->last_rx = jiffies;
+			tp->stats.rx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+			tp->stats.rx_bytes += pkt_len;
+#endif
+		}
+		entry = (++tp->cur_rx) % RX_RING_SIZE;
+	}
+
+	/* Refill the Rx ring buffers. */
+	for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
+		entry = tp->dirty_rx % RX_RING_SIZE;
+		if (tp->rx_skbuff[entry] == NULL) {
+			struct sk_buff *skb;
+			skb = tp->rx_skbuff[entry] = dev_alloc_skb(PKT_BUF_SZ);
+			if (skb == NULL) {
+				if (tp->cur_rx - tp->dirty_rx == RX_RING_SIZE)
+					printk(KERN_ERR "%s: No kernel memory to allocate "
+						   "receive buffers.\n", dev->name);
+				break;
+			}
+			skb->dev = dev;			/* Mark as being used by this device. */
+			tp->rx_ring[entry].buffer1 = virt_to_le32desc(skb->tail);
+			work_done++;
+		}
+		tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
+	}
+
+	return work_done;
+}
+
+static void empty_rings(struct net_device *dev)
+{
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	int i;
+
+	/* Free all the skbuffs in the Rx queue. */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		struct sk_buff *skb = tp->rx_skbuff[i];
+		tp->rx_skbuff[i] = 0;
+		tp->rx_ring[i].status = 0;		/* Not owned by Tulip chip. */
+		tp->rx_ring[i].length = 0;
+		tp->rx_ring[i].buffer1 = 0xBADF00D0; /* An invalid address. */
+		if (skb) {
+#if LINUX_VERSION_CODE < 0x20100
+			skb->free = 1;
+#endif
+			dev_free_skb(skb);
+		}
+	}
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		if (tp->tx_skbuff[i])
+			dev_free_skb(tp->tx_skbuff[i]);
+		tp->tx_skbuff[i] = 0;
+	}
+}
+
+static int tulip_close(struct net_device *dev)
+{
+	long ioaddr = dev->base_addr;
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+
+	netif_stop_tx_queue(dev);
+
+	if (tp->msg_level & NETIF_MSG_IFDOWN)
+		printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
+			   dev->name, (int)inl(ioaddr + CSR5));
+
+	/* Disable interrupts by clearing the interrupt mask. */
+	outl(0x00000000, ioaddr + CSR7);
+	/* Stop the Tx and Rx processes. */
+	outl(inl(ioaddr + CSR6) & ~TxOn & ~RxOn, ioaddr + CSR6);
+	/* 21040 -- Leave the card in 10baseT state. */
+	if (tp->chip_id == DC21040)
+		outl(0x00000004, ioaddr + CSR13);
+
+	if (inl(ioaddr + CSR6) != 0xffffffff)
+		tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
+
+	del_timer(&tp->timer);
+
+	free_irq(dev->irq, dev);
+
+	dev->if_port = tp->saved_if_port;
+
+	empty_rings(dev);
+	/* Leave the driver in snooze, not sleep, mode. */
+	if (tp->flags & HAS_PWRDWN)
+		pci_write_config_dword(tp->pci_dev, 0x40, 0x40000000);
+
+	MOD_DEC_USE_COUNT;
+
+	return 0;
+}
+
+static struct net_device_stats *tulip_get_stats(struct net_device *dev)
+{
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int csr8 = inl(ioaddr + CSR8);
+
+	if (netif_running(dev)  &&  csr8 != 0xffffffff)
+		tp->stats.rx_missed_errors += (u16)csr8;
+
+	return &tp->stats;
+}
+
+#ifdef HAVE_PRIVATE_IOCTL
+/* Provide ioctl() calls to examine the MII xcvr state.
+   We emulate a MII management registers for chips without MII.
+   The two numeric constants are because some clueless person
+   changed value for the symbolic name.
+ */
+static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	u16 *data = (u16 *)&rq->ifr_data;
+	u32 *data32 = (void *)&rq->ifr_data;
+	unsigned int phy = tp->phys[0];
+	unsigned int regnum = data[1];
+
+	switch(cmd) {
+	case 0x8947: case 0x89F0:
+		/* SIOCGMIIPHY: Get the address of the PHY in use. */
+		if (tp->mii_cnt)
+			data[0] = phy;
+		else if (tp->flags & HAS_NWAY)
+			data[0] = 32;
+		else if (tp->chip_id == COMET)
+			data[0] = 1;
+		else
+			return -ENODEV;
+	case 0x8948: case 0x89F1:
+		/* SIOCGMIIREG: Read the specified MII register. */
+		if (data[0] == 32  &&  (tp->flags & HAS_NWAY)) {
+			int csr12 = inl(ioaddr + CSR12);
+			int csr14 = inl(ioaddr + CSR14);
+			switch (regnum) {
+			case 0:
+				if (((csr14<<5) & 0x1000) ||
+					(dev->if_port == 5 && tp->nwayset))
+					data[3] = 0x1000;
+				else
+					data[3] = (media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
+						| (media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
+				break;
+			case 1:
+				data[3] = 0x1848 + ((csr12&0x7000) == 0x5000 ? 0x20 : 0)
+					+ ((csr12&0x06) == 6 ? 0 : 4);
+				if (tp->chip_id != DC21041)
+					data[3] |= 0x6048;
+				break;
+			case 4: {
+				/* Advertised value, bogus 10baseTx-FD value from CSR6. */
+				data[3] = ((inl(ioaddr + CSR6)>>3)&0x0040)+((csr14>>1)&0x20)+1;
+				if (tp->chip_id != DC21041)
+					 data[3] |= ((csr14>>9)&0x03C0);
+				break;
+			}
+			case 5: data[3] = tp->lpar; break;
+			default: data[3] = 0; break;
+			}
+		} else {
+			data[3] = mdio_read(dev, data[0] & 0x1f, regnum);
+		}
+		return 0;
+	case 0x8949: case 0x89F2:
+		/* SIOCSMIIREG: Write the specified MII register */
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		if (regnum & ~0x1f)
+			return -EINVAL;
+		if (data[0] == phy) {
+			u16 value = data[2];
+			switch (regnum) {
+			case 0: /* Check for autonegotiation on or reset. */
+				tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
+				if (tp->full_duplex_lock)
+					tp->full_duplex = (value & 0x0100) ? 1 : 0;
+				break;
+			case 4: tp->mii_advertise = data[2]; break;
+			}
+		}
+		if (data[0] == 32  &&  (tp->flags & HAS_NWAY)) {
+			u16 value = data[2];
+			if (regnum == 0) {
+				if ((value & 0x1200) == 0x1200)
+					nway_start(dev);
+			} else if (regnum == 4)
+				tp->sym_advertise = value;
+		} else {
+			mdio_write(dev, data[0] & 0x1f, regnum, data[2]);
+		}
+		return 0;
+	case SIOCGPARAMS:
+		data32[0] = tp->msg_level;
+		data32[1] = multicast_filter_limit;
+		data32[2] = max_interrupt_work;
+		data32[3] = rx_copybreak;
+		data32[4] = inl(ioaddr + CSR11);
+		return 0;
+	case SIOCSPARAMS:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		tp->msg_level = data32[0];
+		multicast_filter_limit = data32[1];
+		max_interrupt_work = data32[2];
+		rx_copybreak = data32[3];
+		if (tp->flags & HAS_INTR_MITIGATION) {
+			u32 *d = (u32 *)&rq->ifr_data;
+			outl(data32[4], ioaddr + CSR11);
+			printk(KERN_NOTICE "%s: Set interrupt mitigate paramters %8.8x.\n",
+				   dev->name, d[0]);
+		}
+		return 0;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return -EOPNOTSUPP;
+}
+#endif  /* HAVE_PRIVATE_IOCTL */
+
+/* Set or clear the multicast filter for this adaptor.
+   Note that we only use exclusion around actually queueing the
+   new frame, not around filling tp->setup_frame.  This is non-deterministic
+   when re-entered but still correct. */
+
+/* The little-endian AUTODIN32 ethernet CRC calculation.
+   N.B. Do not use for bulk data, use a table-based routine instead.
+   This is common code and should be moved to net/core/crc.c */
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+static inline u32 ether_crc_le(int length, unsigned char *data)
+{
+	u32 crc = 0xffffffff;	/* Initial value. */
+	while(--length >= 0) {
+		unsigned char current_octet = *data++;
+		int bit;
+		for (bit = 8; --bit >= 0; current_octet >>= 1) {
+			if ((crc ^ current_octet) & 1) {
+				crc >>= 1;
+				crc ^= ethernet_polynomial_le;
+			} else
+				crc >>= 1;
+		}
+	}
+	return crc;
+}
+static unsigned const ethernet_polynomial = 0x04c11db7U;
+static inline u32 ether_crc(int length, unsigned char *data)
+{
+	int crc = -1;
+
+	while(--length >= 0) {
+		unsigned char current_octet = *data++;
+		int bit;
+		for (bit = 0; bit < 8; bit++, current_octet >>= 1)
+			crc = (crc << 1) ^
+				((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
+	}
+	return crc;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	int csr6 = inl(ioaddr + CSR6) & ~0x00D5;
+
+	tp->csr6 &= ~0x00D5;
+	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
+		tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
+		csr6 |= AcceptAllMulticast | AcceptAllPhys;
+		/* Unconditionally log net taps. */
+		printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
+	} else if ((dev->mc_count > 1000)  ||  (dev->flags & IFF_ALLMULTI)) {
+		/* Too many to filter well -- accept all multicasts. */
+		tp->csr6 |= AcceptAllMulticast;
+		csr6 |= AcceptAllMulticast;
+	} else	if (tp->flags & MC_HASH_ONLY) {
+		/* Some work-alikes have only a 64-entry hash filter table. */
+		/* Should verify correctness on big-endian/__powerpc__ */
+		struct dev_mc_list *mclist;
+		int i;
+		if (dev->mc_count > multicast_filter_limit) {
+			tp->csr6 |= AcceptAllMulticast;
+			csr6 |= AcceptAllMulticast;
+		} else {
+			u32 mc_filter[2] = {0, 0};		 /* Multicast hash filter */
+			int filterbit;
+			for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+				 i++, mclist = mclist->next) {
+				if (tp->flags & COMET_MAC_ADDR)
+					filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
+				else
+					filterbit = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+				filterbit &= 0x3f;
+				set_bit(filterbit, mc_filter);
+				if (tp->msg_level & NETIF_MSG_RXFILTER)
+					printk(KERN_INFO "%s: Added filter for %2.2x:%2.2x:%2.2x:"
+						   "%2.2x:%2.2x:%2.2x  %8.8x bit %d.\n", dev->name,
+						   mclist->dmi_addr[0], mclist->dmi_addr[1],
+						   mclist->dmi_addr[2], mclist->dmi_addr[3],
+						   mclist->dmi_addr[4], mclist->dmi_addr[5],
+						   ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit);
+			}
+			if (mc_filter[0] == tp->mc_filter[0]  &&
+				mc_filter[1] == tp->mc_filter[1])
+				;				/* No change. */
+			else if (tp->flags & IS_ASIX) {
+				outl(2, ioaddr + CSR13);
+				outl(mc_filter[0], ioaddr + CSR14);
+				outl(3, ioaddr + CSR13);
+				outl(mc_filter[1], ioaddr + CSR14);
+			} else if (tp->flags & COMET_MAC_ADDR) {
+				outl(mc_filter[0], ioaddr + 0xAC);
+				outl(mc_filter[1], ioaddr + 0xB0);
+			}
+			tp->mc_filter[0] = mc_filter[0];
+			tp->mc_filter[1] = mc_filter[1];
+		}
+	} else {
+		u16 *eaddrs, *setup_frm = tp->setup_frame;
+		struct dev_mc_list *mclist;
+		u32 tx_flags = 0x08000000 | 192;
+		int i;
+
+		/* Note that only the low-address shortword of setup_frame is valid!
+		   The values are doubled for big-endian architectures. */
+		if (dev->mc_count > 14) { /* Must use a multicast hash table. */
+			u16 hash_table[32];
+			tx_flags = 0x08400000 | 192;		/* Use hash filter. */
+			memset(hash_table, 0, sizeof(hash_table));
+			set_bit(255, hash_table);			/* Broadcast entry */
+			/* This should work on big-endian machines as well. */
+			for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+				 i++, mclist = mclist->next)
+				set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff,
+						hash_table);
+			for (i = 0; i < 32; i++) {
+				*setup_frm++ = hash_table[i];
+				*setup_frm++ = hash_table[i];
+			}
+			setup_frm = &tp->setup_frame[13*6];
+		} else {
+			/* We have <= 14 addresses so we can use the wonderful
+			   16 address perfect filtering of the Tulip. */
+			for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+				 i++, mclist = mclist->next) {
+				eaddrs = (u16 *)mclist->dmi_addr;
+				*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+				*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+				*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+			}
+			/* Fill the unused entries with the broadcast address. */
+			memset(setup_frm, 0xff, (15-i)*12);
+			setup_frm = &tp->setup_frame[15*6];
+		}
+		/* Fill the final entry with our physical address. */
+		eaddrs = (u16 *)dev->dev_addr;
+		*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
+		*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
+		*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
+		/* Now add this frame to the Tx list. */
+		if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
+			/* Same setup recently queued, we need not add it. */
+		} else {
+			unsigned long flags;
+			unsigned int entry;
+
+			spin_lock_irqsave(&tp->mii_lock, flags);
+			entry = tp->cur_tx++ % TX_RING_SIZE;
+
+			if (entry != 0) {
+				/* Avoid a chip errata by prefixing a dummy entry. */
+				tp->tx_skbuff[entry] = 0;
+				tp->tx_ring[entry].length =
+					(entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP):0;
+				tp->tx_ring[entry].buffer1 = 0;
+				tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
+				entry = tp->cur_tx++ % TX_RING_SIZE;
+			}
+
+			tp->tx_skbuff[entry] = 0;
+			/* Put the setup frame on the Tx list. */
+			if (entry == TX_RING_SIZE-1)
+				tx_flags |= DESC_RING_WRAP;		/* Wrap ring. */
+			tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
+			tp->tx_ring[entry].buffer1 = virt_to_le32desc(tp->setup_frame);
+			tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
+			if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2) {
+				netif_stop_tx_queue(dev);
+				tp->tx_full = 1;
+			}
+			spin_unlock_irqrestore(&tp->mii_lock, flags);
+			/* Trigger an immediate transmit demand. */
+			outl(0, ioaddr + CSR1);
+		}
+	}
+	outl(csr6, ioaddr + CSR6);
+}
+
+
+static int tulip_pwr_event(void *dev_instance, int event)
+{
+	struct net_device *dev = dev_instance;
+	struct tulip_private *tp = (struct tulip_private *)dev->priv;
+	long ioaddr = dev->base_addr;
+	if (tp->msg_level & NETIF_MSG_LINK)
+		printk("%s: Handling power event %d.\n", dev->name, event);
+	switch(event) {
+	case DRV_ATTACH:
+		MOD_INC_USE_COUNT;
+		break;
+	case DRV_SUSPEND: {
+		int csr6 = inl(ioaddr + CSR6);
+		/* Disable interrupts, stop the chip, gather stats. */
+		if (csr6 != 0xffffffff) {
+			int csr8 = inl(ioaddr + CSR8);
+			outl(0x00000000, ioaddr + CSR7);
+			outl(csr6 & ~TxOn & ~RxOn, ioaddr + CSR6);
+			tp->stats.rx_missed_errors += (unsigned short)csr8;
+		}
+		empty_rings(dev);
+		/* Put the 21143 into sleep mode. */
+		if (tp->flags & HAS_PWRDWN)
+			pci_write_config_dword(tp->pci_dev, 0x40,0x80000000);
+		break;
+	}
+	case DRV_RESUME:
+		if (tp->flags & HAS_PWRDWN)
+			pci_write_config_dword(tp->pci_dev, 0x40, 0x0000);
+		outl(tp->csr0, ioaddr + CSR0);
+		tulip_init_ring(dev);
+		outl(virt_to_bus(tp->rx_ring), ioaddr + CSR3);
+		outl(virt_to_bus(tp->tx_ring), ioaddr + CSR4);
+		if (tp->mii_cnt) {
+			dev->if_port = 11;
+			if (tp->mtable  &&  tp->mtable->has_mii)
+				select_media(dev, 1);
+			tp->csr6 = 0x820E0000;
+			dev->if_port = 11;
+			outl(0x0000, ioaddr + CSR13);
+			outl(0x0000, ioaddr + CSR14);
+		} else if (! tp->medialock)
+			nway_start(dev);
+		else
+			select_media(dev, 1);
+		outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+		outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
+		outl(0, ioaddr + CSR2);		/* Rx poll demand */
+		set_rx_mode(dev);
+		break;
+	case DRV_DETACH: {
+		struct net_device **devp, **next;
+		if (dev->flags & IFF_UP) {
+			printk(KERN_ERR "%s: Tulip CardBus interface was detached while "
+				   "still active.\n", dev->name);
+			dev_close(dev);
+			dev->flags &= ~(IFF_UP|IFF_RUNNING);
+		}
+		if (tp->msg_level & NETIF_MSG_DRV)
+			printk(KERN_DEBUG "%s: Unregistering device.\n", dev->name);
+		unregister_netdev(dev);
+		release_region(dev->base_addr, pci_id_tbl[tp->chip_id].io_size);
+#ifndef USE_IO_OPS
+		iounmap((char *)dev->base_addr);
+#endif
+		for (devp = &root_tulip_dev; *devp; devp = next) {
+			next = &((struct tulip_private *)(*devp)->priv)->next_module;
+			if (*devp == dev) {
+				*devp = *next;
+				break;
+			}
+		}
+		if (tp->priv_addr)
+			kfree(tp->priv_addr);
+		kfree(dev);
+		MOD_DEC_USE_COUNT;
+		break;
+	}
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+#ifdef CARDBUS
+
+#include <pcmcia/driver_ops.h>
+
+static dev_node_t *tulip_attach(dev_locator_t *loc)
+{
+	struct net_device *dev;
+	long ioaddr;
+	struct pci_dev *pdev;
+	u8 bus, devfn, irq;
+	u32 dev_id;
+	u32 pciaddr;
+	int i, chip_id = 4;			/* DC21143 */
+
+	if (loc->bus != LOC_PCI) return NULL;
+	bus = loc->b.pci.bus; devfn = loc->b.pci.devfn;
+	printk(KERN_INFO "tulip_attach(bus %d, function %d)\n", bus, devfn);
+	pdev = pci_find_slot(bus, devfn);
+#ifdef USE_IO_OPS
+	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &pciaddr);
+	ioaddr = pciaddr & PCI_BASE_ADDRESS_IO_MASK;
+#else
+	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &pciaddr);
+	ioaddr = (long)ioremap(pciaddr & PCI_BASE_ADDRESS_MEM_MASK,
+						   pci_id_tbl[DC21142].io_size);
+#endif
+	pci_read_config_dword(pdev, 0, &dev_id);
+	pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &irq);
+	if (ioaddr == 0 || irq == 0) {
+		printk(KERN_ERR "The Tulip CardBus Ethernet interface at %d/%d was "
+			   "not assigned an %s.\n"
+			   KERN_ERR "  It will not be activated.\n",
+			   bus, devfn, ioaddr == 0 ? "address" : "IRQ");
+		return NULL;
+	}
+	for (i = 0; pci_id_tbl[i].id.pci; i++) {
+		if (pci_id_tbl[i].id.pci == (dev_id & pci_id_tbl[i].id.pci_mask)) {
+			chip_id = i; break;
+		}
+	}
+	dev = tulip_probe1(pdev, NULL, ioaddr, irq, chip_id, 0);
+	if (dev) {
+		dev_node_t *node = kmalloc(sizeof(dev_node_t), GFP_KERNEL);
+		strcpy(node->dev_name, dev->name);
+		node->major = node->minor = 0;
+		node->next = NULL;
+		MOD_INC_USE_COUNT;
+		return node;
+	}
+	return NULL;
+}
+
+static void tulip_suspend(dev_node_t *node)
+{
+	struct net_device **devp, **next;
+	printk(KERN_INFO "tulip_suspend(%s)\n", node->dev_name);
+	for (devp = &root_tulip_dev; *devp; devp = next) {
+		next = &((struct tulip_private *)(*devp)->priv)->next_module;
+		if (strcmp((*devp)->name, node->dev_name) == 0) {
+			tulip_pwr_event(*devp, DRV_SUSPEND);
+			break;
+		}
+	}
+}
+
+static void tulip_resume(dev_node_t *node)
+{
+	struct net_device **devp, **next;
+	printk(KERN_INFO "tulip_resume(%s)\n", node->dev_name);
+	for (devp = &root_tulip_dev; *devp; devp = next) {
+		next = &((struct tulip_private *)(*devp)->priv)->next_module;
+		if (strcmp((*devp)->name, node->dev_name) == 0) {
+			tulip_pwr_event(*devp, DRV_RESUME);
+			break;
+		}
+	}
+}
+
+static void tulip_detach(dev_node_t *node)
+{
+	struct net_device **devp, **next;
+	printk(KERN_INFO "tulip_detach(%s)\n", node->dev_name);
+	for (devp = &root_tulip_dev; *devp; devp = next) {
+		next = &((struct tulip_private *)(*devp)->priv)->next_module;
+		if (strcmp((*devp)->name, node->dev_name) == 0) break;
+	}
+	if (*devp) {
+		struct tulip_private *tp = (struct tulip_private *)(*devp)->priv;
+		unregister_netdev(*devp);
+		release_region((*devp)->base_addr, pci_id_tbl[DC21142].io_size);
+#ifndef USE_IO_OPS
+		iounmap((char *)(*devp)->base_addr);
+#endif
+		kfree(*devp);
+		if (tp->priv_addr)
+			kfree(tp->priv_addr);
+		*devp = *next;
+		kfree(node);
+		MOD_DEC_USE_COUNT;
+	}
+}
+
+struct driver_operations tulip_ops = {
+	"tulip_cb", tulip_attach, tulip_suspend, tulip_resume, tulip_detach
+};
+
+#endif  /* Cardbus support */
+
+
+#ifdef MODULE
+int init_module(void)
+{
+	if (debug >= NETIF_MSG_DRV)	/* Emit version even if no cards detected. */
+		printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+#ifdef CARDBUS
+	register_driver(&tulip_ops);
+	return 0;
+#else
+	return pci_drv_register(&tulip_drv_id, NULL);
+#endif
+	reverse_probe = 0;			/* Not used. */
+}
+
+void cleanup_module(void)
+{
+	struct net_device *next_dev;
+
+#ifdef CARDBUS
+	unregister_driver(&tulip_ops);
+#else
+	pci_drv_unregister(&tulip_drv_id);
+#endif
+
+	/* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+	while (root_tulip_dev) {
+		struct tulip_private *tp = (struct tulip_private*)root_tulip_dev->priv;
+		unregister_netdev(root_tulip_dev);
+		release_region(root_tulip_dev->base_addr,
+					   pci_id_tbl[tp->chip_id].io_size);
+#ifndef USE_IO_OPS
+		iounmap((char *)root_tulip_dev->base_addr);
+#endif
+		next_dev = tp->next_module;
+		if (tp->priv_addr)
+			kfree(tp->priv_addr);
+		kfree(root_tulip_dev);
+		root_tulip_dev = next_dev;
+	}
+}
+#else
+int tulip_probe(struct net_device *dev)
+{
+	if (pci_drv_register(&tulip_drv_id, dev) < 0)
+		return -ENODEV;
+	printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+	return 0;
+	reverse_probe = 0;			/* Not used. */
+}
+#endif  /* MODULE */
+
+/*
+ * Local variables:
+ *  compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c tulip.c"
+ *  cardbus-compile-command: "gcc -DCARDBUS -DMODULE -Wall -Wstrict-prototypes -O6 -c tulip.c -o tulip_cb.o -I/usr/src/pcmcia/include/"
+ *  c-indent-level: 4
+ *  c-basic-offset: 4
+ *  tab-width: 4
+ * End:
+ */
