1 /* $OpenBSD: if_cpsw.c,v 1.34 2016/04/13 11:33:59 mpi Exp $ */
2 /* $NetBSD: if_cpsw.c,v 1.3 2013/04/17 14:36:34 bouyer Exp $ */
5 * Copyright (c) 2013 Jonathan A. Kollasch
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
27 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org>
32 * All rights reserved.
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/sockio.h>
63 #include <sys/queue.h>
64 #include <sys/kernel.h>
65 #include <sys/device.h>
66 #include <sys/timeout.h>
67 #include <sys/socket.h>
69 #include <machine/bus.h>
72 #include <net/if_media.h>
74 #include <netinet/in.h>
75 #include <netinet/if_ether.h>
81 #include <dev/mii/mii.h>
82 #include <dev/mii/miivar.h>
84 #include <arch/armv7/armv7/armv7var.h>
85 #include <arch/armv7/omap/sitara_cm.h>
86 #include <arch/armv7/omap/if_cpswreg.h>
88 #define CPSW_TXFRAGS 16
90 #define OMAP2SCM_MAC_ID0_LO 0x630
91 #define OMAP2SCM_MAC_ID0_HI 0x634
93 #define CPSW_CPPI_RAM_SIZE (0x2000)
94 #define CPSW_CPPI_RAM_TXDESCS_SIZE (CPSW_CPPI_RAM_SIZE/2)
95 #define CPSW_CPPI_RAM_RXDESCS_SIZE \
96 (CPSW_CPPI_RAM_SIZE - CPSW_CPPI_RAM_TXDESCS_SIZE)
97 #define CPSW_CPPI_RAM_TXDESCS_BASE (CPSW_CPPI_RAM_OFFSET + 0x0000)
98 #define CPSW_CPPI_RAM_RXDESCS_BASE \
99 (CPSW_CPPI_RAM_OFFSET + CPSW_CPPI_RAM_TXDESCS_SIZE)
101 #define CPSW_NTXDESCS (CPSW_CPPI_RAM_TXDESCS_SIZE/sizeof(struct cpsw_cpdma_bd))
102 #define CPSW_NRXDESCS (CPSW_CPPI_RAM_RXDESCS_SIZE/sizeof(struct cpsw_cpdma_bd))
104 #define CPSW_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
106 #define TXDESC_NEXT(x) cpsw_txdesc_adjust((x), 1)
107 #define TXDESC_PREV(x) cpsw_txdesc_adjust((x), -1)
109 #define RXDESC_NEXT(x) cpsw_rxdesc_adjust((x), 1)
110 #define RXDESC_PREV(x) cpsw_rxdesc_adjust((x), -1)
112 struct cpsw_ring_data
{
113 bus_dmamap_t tx_dm
[CPSW_NTXDESCS
];
114 struct mbuf
*tx_mb
[CPSW_NTXDESCS
];
115 bus_dmamap_t rx_dm
[CPSW_NRXDESCS
];
116 struct mbuf
*rx_mb
[CPSW_NRXDESCS
];
120 struct device sc_dev
;
121 bus_space_tag_t sc_bst
;
122 bus_space_handle_t sc_bsh
;
123 bus_dma_tag_t sc_bdt
;
124 bus_space_handle_t sc_bsh_txdescs
;
125 bus_space_handle_t sc_bsh_rxdescs
;
126 bus_addr_t sc_txdescs_pa
;
127 bus_addr_t sc_rxdescs_pa
;
130 struct mii_data sc_mii
;
132 struct cpsw_ring_data
*sc_rdp
;
133 volatile u_int sc_txnext
;
134 volatile u_int sc_txhead
;
135 volatile u_int sc_rxhead
;
143 bus_dmamap_t sc_txpad_dm
;
144 #define sc_txpad_pa sc_txpad_dm->dm_segs[0].ds_addr
146 volatile bool sc_txrun
;
147 volatile bool sc_rxrun
;
148 volatile bool sc_txeoq
;
149 volatile bool sc_rxeoq
;
150 struct timeout sc_tick
;
154 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname)
156 void cpsw_attach(struct device
*, struct device
*, void *);
158 void cpsw_start(struct ifnet
*);
159 int cpsw_ioctl(struct ifnet
*, u_long
, caddr_t
);
160 void cpsw_watchdog(struct ifnet
*);
161 int cpsw_init(struct ifnet
*);
162 void cpsw_stop(struct ifnet
*);
164 int cpsw_mii_readreg(struct device
*, int, int);
165 void cpsw_mii_writereg(struct device
*, int, int, int);
166 void cpsw_mii_statchg(struct device
*);
168 void cpsw_tick(void *);
170 int cpsw_new_rxbuf(struct cpsw_softc
* const, const u_int
);
171 int cpsw_mediachange(struct ifnet
*);
172 void cpsw_mediastatus(struct ifnet
*, struct ifmediareq
*);
174 int cpsw_rxthintr(void *);
175 int cpsw_rxintr(void *);
176 int cpsw_txintr(void *);
177 int cpsw_miscintr(void *);
179 void cpsw_get_mac_addr(struct cpsw_softc
*);
181 struct cfattach cpsw_ca
= {
182 sizeof(struct cpsw_softc
),
187 struct cfdriver cpsw_cd
= {
194 cpsw_txdesc_adjust(u_int x
, int y
)
196 return (((x
) + y
) & (CPSW_NTXDESCS
- 1));
200 cpsw_rxdesc_adjust(u_int x
, int y
)
202 return (((x
) + y
) & (CPSW_NRXDESCS
- 1));
206 cpsw_set_txdesc_next(struct cpsw_softc
* const sc
, const u_int i
, uint32_t n
)
208 const bus_size_t o
= sizeof(struct cpsw_cpdma_bd
) * i
+ 0;
209 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh_txdescs
, o
, n
);
213 cpsw_set_rxdesc_next(struct cpsw_softc
* const sc
, const u_int i
, uint32_t n
)
215 const bus_size_t o
= sizeof(struct cpsw_cpdma_bd
) * i
+ 0;
216 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh_rxdescs
, o
, n
);
220 cpsw_get_txdesc(struct cpsw_softc
* const sc
, const u_int i
,
221 struct cpsw_cpdma_bd
* const bdp
)
223 const bus_size_t o
= sizeof(struct cpsw_cpdma_bd
) * i
;
224 bus_space_read_region_4(sc
->sc_bst
, sc
->sc_bsh_txdescs
, o
,
229 cpsw_set_txdesc(struct cpsw_softc
* const sc
, const u_int i
,
230 struct cpsw_cpdma_bd
* const bdp
)
232 const bus_size_t o
= sizeof(struct cpsw_cpdma_bd
) * i
;
233 bus_space_write_region_4(sc
->sc_bst
, sc
->sc_bsh_txdescs
, o
,
238 cpsw_get_rxdesc(struct cpsw_softc
* const sc
, const u_int i
,
239 struct cpsw_cpdma_bd
* const bdp
)
241 const bus_size_t o
= sizeof(struct cpsw_cpdma_bd
) * i
;
242 bus_space_read_region_4(sc
->sc_bst
, sc
->sc_bsh_rxdescs
, o
,
247 cpsw_set_rxdesc(struct cpsw_softc
* const sc
, const u_int i
,
248 struct cpsw_cpdma_bd
* const bdp
)
250 const bus_size_t o
= sizeof(struct cpsw_cpdma_bd
) * i
;
251 bus_space_write_region_4(sc
->sc_bst
, sc
->sc_bsh_rxdescs
, o
,
255 static inline bus_addr_t
256 cpsw_txdesc_paddr(struct cpsw_softc
* const sc
, u_int x
)
258 KASSERT(x
< CPSW_NTXDESCS
);
259 return sc
->sc_txdescs_pa
+ sizeof(struct cpsw_cpdma_bd
) * x
;
262 static inline bus_addr_t
263 cpsw_rxdesc_paddr(struct cpsw_softc
* const sc
, u_int x
)
265 KASSERT(x
< CPSW_NRXDESCS
);
266 return sc
->sc_rxdescs_pa
+ sizeof(struct cpsw_cpdma_bd
) * x
;
270 cpsw_get_mac_addr(struct cpsw_softc
*sc
)
272 struct arpcom
*ac
= &sc
->sc_ac
;
273 u_int32_t mac_lo
= 0, mac_hi
= 0;
275 sitara_cm_reg_read_4(OMAP2SCM_MAC_ID0_LO
, &mac_lo
);
276 sitara_cm_reg_read_4(OMAP2SCM_MAC_ID0_HI
, &mac_hi
);
278 if ((mac_lo
== 0) && (mac_hi
== 0))
279 printf("%s: invalid ethernet address\n", DEVNAME(sc
));
281 ac
->ac_enaddr
[0] = (mac_hi
>> 0) & 0xff;
282 ac
->ac_enaddr
[1] = (mac_hi
>> 8) & 0xff;
283 ac
->ac_enaddr
[2] = (mac_hi
>> 16) & 0xff;
284 ac
->ac_enaddr
[3] = (mac_hi
>> 24) & 0xff;
285 ac
->ac_enaddr
[4] = (mac_lo
>> 0) & 0xff;
286 ac
->ac_enaddr
[5] = (mac_lo
>> 8) & 0xff;
291 cpsw_mdio_init(struct cpsw_softc
*sc
)
293 uint32_t alive
, link
;
296 sc
->sc_active_port
= 0;
298 /* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
299 /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
300 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, MDIOCONTROL
,
301 (1<<30) | (1<<18) | 0xFF);
303 for(tries
= 0; tries
< 1000; tries
++) {
304 alive
= bus_space_read_4(sc
->sc_bst
, sc
->sc_bsh
, MDIOALIVE
) & 3;
311 printf("%s: no PHY is alive\n", DEVNAME(sc
));
315 link
= bus_space_read_4(sc
->sc_bst
, sc
->sc_bsh
, MDIOLINK
) & 3;
318 /* both ports are alive, prefer one with link */
320 sc
->sc_active_port
= 1;
321 } else if (alive
== 2)
322 sc
->sc_active_port
= 1;
324 /* Select the port to monitor */
325 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, MDIOUSERPHYSEL0
,
330 cpsw_attach(struct device
*parent
, struct device
*self
, void *aux
)
332 struct cpsw_softc
*sc
= (struct cpsw_softc
*)self
;
333 struct armv7_attach_args
*aa
= aux
;
334 struct arpcom
* const ac
= &sc
->sc_ac
;
335 struct ifnet
* const ifp
= &ac
->ac_if
;
340 timeout_set(&sc
->sc_tick
, cpsw_tick
, sc
);
342 cpsw_get_mac_addr(sc
);
344 sc
->sc_rxthih
= arm_intr_establish(aa
->aa_dev
->irq
[0] +
345 CPSW_INTROFF_RXTH
, IPL_NET
, cpsw_rxthintr
, sc
, DEVNAME(sc
));
346 sc
->sc_rxih
= arm_intr_establish(aa
->aa_dev
->irq
[0] +
347 CPSW_INTROFF_RX
, IPL_NET
, cpsw_rxintr
, sc
, DEVNAME(sc
));
348 sc
->sc_txih
= arm_intr_establish(aa
->aa_dev
->irq
[0] +
349 CPSW_INTROFF_TX
, IPL_NET
, cpsw_txintr
, sc
, DEVNAME(sc
));
350 sc
->sc_miscih
= arm_intr_establish(aa
->aa_dev
->irq
[0] +
351 CPSW_INTROFF_MISC
, IPL_NET
, cpsw_miscintr
, sc
, DEVNAME(sc
));
353 sc
->sc_bst
= aa
->aa_iot
;
354 sc
->sc_bdt
= aa
->aa_dmat
;
356 error
= bus_space_map(sc
->sc_bst
, aa
->aa_dev
->mem
[0].addr
,
357 aa
->aa_dev
->mem
[0].size
, 0, &sc
->sc_bsh
);
359 printf("can't map registers: %d\n", error
);
363 sc
->sc_txdescs_pa
= aa
->aa_dev
->mem
[0].addr
+
364 CPSW_CPPI_RAM_TXDESCS_BASE
;
365 error
= bus_space_subregion(sc
->sc_bst
, sc
->sc_bsh
,
366 CPSW_CPPI_RAM_TXDESCS_BASE
, CPSW_CPPI_RAM_TXDESCS_SIZE
,
367 &sc
->sc_bsh_txdescs
);
369 printf("can't subregion tx ring SRAM: %d\n", error
);
373 sc
->sc_rxdescs_pa
= aa
->aa_dev
->mem
[0].addr
+
374 CPSW_CPPI_RAM_RXDESCS_BASE
;
375 error
= bus_space_subregion(sc
->sc_bst
, sc
->sc_bsh
,
376 CPSW_CPPI_RAM_RXDESCS_BASE
, CPSW_CPPI_RAM_RXDESCS_SIZE
,
377 &sc
->sc_bsh_rxdescs
);
379 printf("can't subregion rx ring SRAM: %d\n", error
);
383 sc
->sc_rdp
= malloc(sizeof(*sc
->sc_rdp
), M_TEMP
, M_WAITOK
);
384 KASSERT(sc
->sc_rdp
!= NULL
);
386 for (i
= 0; i
< CPSW_NTXDESCS
; i
++) {
387 if ((error
= bus_dmamap_create(sc
->sc_bdt
, MCLBYTES
,
388 CPSW_TXFRAGS
, MCLBYTES
, 0, 0,
389 &sc
->sc_rdp
->tx_dm
[i
])) != 0) {
390 printf("unable to create tx DMA map: %d\n", error
);
392 sc
->sc_rdp
->tx_mb
[i
] = NULL
;
395 for (i
= 0; i
< CPSW_NRXDESCS
; i
++) {
396 if ((error
= bus_dmamap_create(sc
->sc_bdt
, MCLBYTES
, 1,
397 MCLBYTES
, 0, 0, &sc
->sc_rdp
->rx_dm
[i
])) != 0) {
398 printf("unable to create rx DMA map: %d\n", error
);
400 sc
->sc_rdp
->rx_mb
[i
] = NULL
;
403 sc
->sc_txpad
= dma_alloc(ETHER_MIN_LEN
, PR_WAITOK
| PR_ZERO
);
404 KASSERT(sc
->sc_txpad
!= NULL
);
405 bus_dmamap_create(sc
->sc_bdt
, ETHER_MIN_LEN
, 1, ETHER_MIN_LEN
, 0,
406 BUS_DMA_WAITOK
, &sc
->sc_txpad_dm
);
407 bus_dmamap_load(sc
->sc_bdt
, sc
->sc_txpad_dm
, sc
->sc_txpad
,
408 ETHER_MIN_LEN
, NULL
, BUS_DMA_WAITOK
|BUS_DMA_WRITE
);
409 bus_dmamap_sync(sc
->sc_bdt
, sc
->sc_txpad_dm
, 0, ETHER_MIN_LEN
,
410 BUS_DMASYNC_PREWRITE
);
412 idver
= bus_space_read_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_SS_IDVER
);
413 printf(": version %d.%d (%d), address %s\n",
414 CPSW_SS_IDVER_MAJ(idver
), CPSW_SS_IDVER_MIN(idver
),
415 CPSW_SS_IDVER_RTL(idver
), ether_sprintf(ac
->ac_enaddr
));
418 ifp
->if_capabilities
= 0;
419 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
420 ifp
->if_start
= cpsw_start
;
421 ifp
->if_ioctl
= cpsw_ioctl
;
422 ifp
->if_watchdog
= cpsw_watchdog
;
423 IFQ_SET_MAXLEN(&ifp
->if_snd
, CPSW_NTXDESCS
- 1);
424 memcpy(ifp
->if_xname
, DEVNAME(sc
), IFNAMSIZ
);
428 sc
->sc_mii
.mii_ifp
= ifp
;
429 sc
->sc_mii
.mii_readreg
= cpsw_mii_readreg
;
430 sc
->sc_mii
.mii_writereg
= cpsw_mii_writereg
;
431 sc
->sc_mii
.mii_statchg
= cpsw_mii_statchg
;
435 ifmedia_init(&sc
->sc_mii
.mii_media
, 0, cpsw_mediachange
,
437 mii_attach(self
, &sc
->sc_mii
, 0xffffffff,
438 MII_PHY_ANY
, MII_OFFSET_ANY
, 0);
439 if (LIST_FIRST(&sc
->sc_mii
.mii_phys
) == NULL
) {
440 printf("no PHY found!\n");
441 ifmedia_add(&sc
->sc_mii
.mii_media
,
442 IFM_ETHER
|IFM_MANUAL
, 0, NULL
);
443 ifmedia_set(&sc
->sc_mii
.mii_media
, IFM_ETHER
|IFM_MANUAL
);
445 ifmedia_set(&sc
->sc_mii
.mii_media
, IFM_ETHER
|IFM_AUTO
);
455 cpsw_mediachange(struct ifnet
*ifp
)
457 struct cpsw_softc
*sc
= ifp
->if_softc
;
459 if (LIST_FIRST(&sc
->sc_mii
.mii_phys
))
460 mii_mediachg(&sc
->sc_mii
);
466 cpsw_mediastatus(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
468 struct cpsw_softc
*sc
= ifp
->if_softc
;
470 if (LIST_FIRST(&sc
->sc_mii
.mii_phys
)) {
471 mii_pollstat(&sc
->sc_mii
);
472 ifmr
->ifm_active
= sc
->sc_mii
.mii_media_active
;
473 ifmr
->ifm_status
= sc
->sc_mii
.mii_media_status
;
478 cpsw_start(struct ifnet
*ifp
)
480 struct cpsw_softc
* const sc
= ifp
->if_softc
;
481 struct cpsw_ring_data
* const rdp
= sc
->sc_rdp
;
482 struct cpsw_cpdma_bd bd
;
493 if (!ISSET(ifp
->if_flags
, IFF_RUNNING
) ||
494 ifq_is_oactive(&ifp
->if_snd
) ||
495 IFQ_IS_EMPTY(&ifp
->if_snd
))
498 if (sc
->sc_txnext
>= sc
->sc_txhead
)
499 txfree
= CPSW_NTXDESCS
- 1 + sc
->sc_txhead
- sc
->sc_txnext
;
501 txfree
= sc
->sc_txhead
- sc
->sc_txnext
- 1;
504 if (txfree
<= CPSW_TXFRAGS
) {
505 ifq_set_oactive(&ifp
->if_snd
);
509 IFQ_DEQUEUE(&ifp
->if_snd
, m
);
513 dm
= rdp
->tx_dm
[sc
->sc_txnext
];
514 error
= bus_dmamap_load_mbuf(sc
->sc_bdt
, dm
, m
, BUS_DMA_NOWAIT
);
519 case EFBIG
: /* mbuf chain is too fragmented */
520 if (m_defrag(m
, M_DONTWAIT
) == 0 &&
521 bus_dmamap_load_mbuf(sc
->sc_bdt
, dm
, m
,
522 BUS_DMA_NOWAIT
) == 0)
532 mlen
= dm
->dm_mapsize
;
533 pad
= mlen
< CPSW_PAD_LEN
;
535 KASSERT(rdp
->tx_mb
[sc
->sc_txnext
] == NULL
);
536 rdp
->tx_mb
[sc
->sc_txnext
] = m
;
540 bpf_mtap(ifp
->if_bpf
, m
, BPF_DIRECTION_OUT
);
543 bus_dmamap_sync(sc
->sc_bdt
, dm
, 0, dm
->dm_mapsize
,
544 BUS_DMASYNC_PREWRITE
);
547 txstart
= sc
->sc_txnext
;
548 eopi
= sc
->sc_txnext
;
549 for (seg
= 0; seg
< dm
->dm_nsegs
; seg
++) {
550 bd
.next
= cpsw_txdesc_paddr(sc
,
551 TXDESC_NEXT(sc
->sc_txnext
));
552 bd
.bufptr
= dm
->dm_segs
[seg
].ds_addr
;
554 bd
.buflen
= dm
->dm_segs
[seg
].ds_len
;
559 bd
.flags
= CPDMA_BD_OWNER
| CPDMA_BD_SOP
;
560 bd
.pktlen
= MAX(mlen
, CPSW_PAD_LEN
);
563 if (seg
== dm
->dm_nsegs
- 1 && !pad
)
564 bd
.flags
|= CPDMA_BD_EOP
;
566 cpsw_set_txdesc(sc
, sc
->sc_txnext
, &bd
);
568 eopi
= sc
->sc_txnext
;
569 sc
->sc_txnext
= TXDESC_NEXT(sc
->sc_txnext
);
572 bd
.next
= cpsw_txdesc_paddr(sc
,
573 TXDESC_NEXT(sc
->sc_txnext
));
574 bd
.bufptr
= sc
->sc_txpad_pa
;
576 bd
.buflen
= CPSW_PAD_LEN
- mlen
;
578 bd
.flags
= CPDMA_BD_EOP
;
580 cpsw_set_txdesc(sc
, sc
->sc_txnext
, &bd
);
582 eopi
= sc
->sc_txnext
;
583 sc
->sc_txnext
= TXDESC_NEXT(sc
->sc_txnext
);
589 /* terminate the new chain */
590 KASSERT(eopi
== TXDESC_PREV(sc
->sc_txnext
));
591 cpsw_set_txdesc_next(sc
, TXDESC_PREV(sc
->sc_txnext
), 0);
593 /* link the new chain on */
594 cpsw_set_txdesc_next(sc
, TXDESC_PREV(txstart
),
595 cpsw_txdesc_paddr(sc
, txstart
));
597 /* kick the dma engine */
598 sc
->sc_txeoq
= false;
599 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_TX_HDP(0),
600 cpsw_txdesc_paddr(sc
, txstart
));
606 cpsw_ioctl(struct ifnet
*ifp
, u_long cmd
, caddr_t data
)
608 struct cpsw_softc
*sc
= ifp
->if_softc
;
609 struct ifreq
*ifr
= (struct ifreq
*)data
;
615 ifp
->if_flags
|= IFF_UP
;
618 if (ifp
->if_flags
& IFF_UP
) {
619 if (ifp
->if_flags
& IFF_RUNNING
)
624 if (ifp
->if_flags
& IFF_RUNNING
)
629 ifr
->ifr_media
&= ~IFM_ETH_FMASK
;
632 error
= ifmedia_ioctl(ifp
, ifr
, &sc
->sc_mii
.mii_media
, cmd
);
635 error
= ether_ioctl(ifp
, &sc
->sc_ac
, cmd
, data
);
638 if (error
== ENETRESET
) {
639 if (ifp
->if_flags
& IFF_RUNNING
)
650 cpsw_watchdog(struct ifnet
*ifp
)
652 printf("%s: device timeout\n", ifp
->if_xname
);
660 cpsw_mii_wait(struct cpsw_softc
* const sc
, int reg
)
664 for(tries
= 0; tries
< 1000; tries
++) {
665 if ((bus_space_read_4(sc
->sc_bst
, sc
->sc_bsh
, reg
) & (1U << 31)) == 0)
673 cpsw_mii_readreg(struct device
*dev
, int phy
, int reg
)
675 struct cpsw_softc
* const sc
= (struct cpsw_softc
*)dev
;
678 if (cpsw_mii_wait(sc
, MDIOUSERACCESS0
) != 0)
681 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, MDIOUSERACCESS0
, (1U << 31) |
682 ((reg
& 0x1F) << 21) | ((phy
& 0x1F) << 16));
684 if (cpsw_mii_wait(sc
, MDIOUSERACCESS0
) != 0)
687 v
= bus_space_read_4(sc
->sc_bst
, sc
->sc_bsh
, MDIOUSERACCESS0
);
695 cpsw_mii_writereg(struct device
*dev
, int phy
, int reg
, int val
)
697 struct cpsw_softc
* const sc
= (struct cpsw_softc
*)dev
;
700 KASSERT((val
& 0xffff0000UL
) == 0);
702 if (cpsw_mii_wait(sc
, MDIOUSERACCESS0
) != 0)
705 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, MDIOUSERACCESS0
, (1U << 31) | (1 << 30) |
706 ((reg
& 0x1F) << 21) | ((phy
& 0x1F) << 16) | val
);
708 if (cpsw_mii_wait(sc
, MDIOUSERACCESS0
) != 0)
711 v
= bus_space_read_4(sc
->sc_bst
, sc
->sc_bsh
, MDIOUSERACCESS0
);
712 if ((v
& (1 << 29)) == 0)
714 printf("%s error\n", __func__
);
719 cpsw_mii_statchg(struct device
*self
)
725 cpsw_new_rxbuf(struct cpsw_softc
* const sc
, const u_int i
)
727 struct cpsw_ring_data
* const rdp
= sc
->sc_rdp
;
728 const u_int h
= RXDESC_PREV(i
);
729 struct cpsw_cpdma_bd bd
;
733 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
738 MCLGET(m
, M_DONTWAIT
);
739 if ((m
->m_flags
& M_EXT
) == 0) {
744 /* We have a new buffer, prepare it for the ring. */
746 if (rdp
->rx_mb
[i
] != NULL
)
747 bus_dmamap_unload(sc
->sc_bdt
, rdp
->rx_dm
[i
]);
749 m
->m_len
= m
->m_pkthdr
.len
= MCLBYTES
;
753 error
= bus_dmamap_load_mbuf(sc
->sc_bdt
, rdp
->rx_dm
[i
], rdp
->rx_mb
[i
],
754 BUS_DMA_READ
|BUS_DMA_NOWAIT
);
756 printf("can't load rx DMA map %d: %d\n", i
, error
);
759 bus_dmamap_sync(sc
->sc_bdt
, rdp
->rx_dm
[i
],
760 0, rdp
->rx_dm
[i
]->dm_mapsize
, BUS_DMASYNC_PREREAD
);
765 /* (re-)setup the descriptor */
767 bd
.bufptr
= rdp
->rx_dm
[i
]->dm_segs
[0].ds_addr
;
769 bd
.buflen
= MIN(0x7ff, rdp
->rx_dm
[i
]->dm_segs
[0].ds_len
);
771 bd
.flags
= CPDMA_BD_OWNER
;
773 cpsw_set_rxdesc(sc
, i
, &bd
);
774 /* and link onto ring */
775 cpsw_set_rxdesc_next(sc
, h
, cpsw_rxdesc_paddr(sc
, i
));
781 cpsw_init(struct ifnet
*ifp
)
783 struct cpsw_softc
* const sc
= ifp
->if_softc
;
784 struct arpcom
*ac
= &sc
->sc_ac
;
785 struct mii_data
* const mii
= &sc
->sc_mii
;
794 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_WR_SOFT_RESET
, 1);
795 while(bus_space_read_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_WR_SOFT_RESET
) & 1);
798 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_SS_SOFT_RESET
, 1);
799 while(bus_space_read_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_SS_SOFT_RESET
) & 1);
801 /* Clear table (30) and enable ALE(31) and set passthrough (4) */
802 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_ALE_CONTROL
, (3 << 30) | 0x10);
804 /* Reset and init Sliver port 1 and 2 */
805 for (i
= 0; i
< 2; i
++) {
807 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_SL_SOFT_RESET(i
), 1);
808 while(bus_space_read_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_SL_SOFT_RESET(i
)) & 1);
809 /* Set Slave Mapping */
810 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_SL_RX_PRI_MAP(i
), 0x76543210);
811 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_PORT_P_TX_PRI_MAP(i
+1), 0x33221100);
812 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_SL_RX_MAXLEN(i
), 0x5f2);
813 /* Set MAC Address */
814 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_PORT_P_SA_HI(i
+1),
815 ac
->ac_enaddr
[0] | (ac
->ac_enaddr
[1] << 8) |
816 (ac
->ac_enaddr
[2] << 16) | (ac
->ac_enaddr
[3] << 24));
817 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_PORT_P_SA_LO(i
+1),
818 ac
->ac_enaddr
[4] | (ac
->ac_enaddr
[5] << 8));
820 /* Set MACCONTROL for ports 0,1: FULLDUPLEX(0), GMII_EN(5),
821 IFCTL_A(15), IFCTL_B(16) FIXME */
822 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_SL_MACCONTROL(i
),
823 1 | (1<<5) | (1<<15) | (1<<16));
825 /* Set ALE port to forwarding(3) on the active port */
826 if (i
== sc
->sc_active_port
)
827 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_ALE_PORTCTL(i
+1), 3);
830 /* Set Host Port Mapping */
831 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_PORT_P0_CPDMA_TX_PRI_MAP
, 0x76543210);
832 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_PORT_P0_CPDMA_RX_CH_MAP
, 0);
834 /* Set ALE port to forwarding(3) */
835 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_ALE_PORTCTL(0), 3);
837 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_SS_PTYPE
, 0);
838 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_SS_STAT_PORT_EN
, 7);
840 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_SOFT_RESET
, 1);
841 while(bus_space_read_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_SOFT_RESET
) & 1);
843 for (i
= 0; i
< 8; i
++) {
844 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_TX_HDP(i
), 0);
845 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_RX_HDP(i
), 0);
846 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_TX_CP(i
), 0);
847 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_RX_CP(i
), 0);
850 bus_space_set_region_4(sc
->sc_bst
, sc
->sc_bsh_txdescs
, 0, 0,
851 CPSW_CPPI_RAM_TXDESCS_SIZE
/4);
856 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_RX_FREEBUFFER(0), 0);
858 bus_space_set_region_4(sc
->sc_bst
, sc
->sc_bsh_rxdescs
, 0, 0,
859 CPSW_CPPI_RAM_RXDESCS_SIZE
/4);
861 /* Initialize RX Buffer Descriptors */
862 cpsw_set_rxdesc_next(sc
, RXDESC_PREV(0), 0);
863 for (i
= 0; i
< CPSW_NRXDESCS
; i
++) {
864 cpsw_new_rxbuf(sc
, i
);
868 /* align layer 3 header to 32-bit */
869 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_RX_BUFFER_OFFSET
, ETHER_ALIGN
);
871 /* Clear all interrupt Masks */
872 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_RX_INTMASK_CLEAR
, 0xFFFFFFFF);
873 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_TX_INTMASK_CLEAR
, 0xFFFFFFFF);
875 /* Enable TX & RX DMA */
876 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_TX_CONTROL
, 1);
877 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_RX_CONTROL
, 1);
879 /* Enable interrupt pacing for C0 RX/TX (IMAX set to max intr/ms allowed) */
880 #define CPSW_VBUSP_CLK_MHZ 2400 /* hardcoded for BBB */
881 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_WR_C_RX_IMAX(0), 2);
882 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_WR_C_TX_IMAX(0), 2);
883 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_WR_INT_CONTROL
, 3 << 16 | CPSW_VBUSP_CLK_MHZ
/4);
885 /* Enable TX and RX interrupt receive for core 0 */
886 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_WR_C_TX_EN(0), 1);
887 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_WR_C_RX_EN(0), 1);
888 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_WR_C_MISC_EN(0), 0x1F);
890 /* Enable host Error Interrupt */
891 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_DMA_INTMASK_SET
, 2);
893 /* Enable interrupts for TX and RX Channel 0 */
894 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_TX_INTMASK_SET
, 1);
895 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_RX_INTMASK_SET
, 1);
897 /* Ack stalled irqs */
898 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_CPDMA_EOI_VECTOR
, CPSW_INTROFF_RXTH
);
899 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_CPDMA_EOI_VECTOR
, CPSW_INTROFF_RX
);
900 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_CPDMA_EOI_VECTOR
, CPSW_INTROFF_TX
);
901 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_CPDMA_EOI_VECTOR
, CPSW_INTROFF_MISC
);
907 /* Write channel 0 RX HDP */
908 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_RX_HDP(0), cpsw_rxdesc_paddr(sc
, 0));
910 sc
->sc_rxeoq
= false;
915 ifp
->if_flags
|= IFF_RUNNING
;
916 ifq_clr_oactive(&ifp
->if_snd
);
918 timeout_add_sec(&sc
->sc_tick
, 1);
924 cpsw_stop(struct ifnet
*ifp
)
926 struct cpsw_softc
* const sc
= ifp
->if_softc
;
927 struct cpsw_ring_data
* const rdp
= sc
->sc_rdp
;
931 /* XXX find where disable comes from */
932 printf("%s: ifp %p disable %d\n", __func__
, ifp
, disable
);
934 if ((ifp
->if_flags
& IFF_RUNNING
) == 0)
937 timeout_del(&sc
->sc_tick
);
939 mii_down(&sc
->sc_mii
);
941 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_TX_INTMASK_CLEAR
, 1);
942 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_RX_INTMASK_CLEAR
, 1);
943 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_WR_C_TX_EN(0), 0x0);
944 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_WR_C_RX_EN(0), 0x0);
945 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_WR_C_MISC_EN(0), 0x1F);
947 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_TX_TEARDOWN
, 0);
948 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_RX_TEARDOWN
, 0);
950 while ((sc
->sc_txrun
|| sc
->sc_rxrun
) && i
< 10000) {
952 if ((sc
->sc_txrun
== true) && cpsw_txintr(sc
) == 0)
953 sc
->sc_txrun
= false;
954 if ((sc
->sc_rxrun
== true) && cpsw_rxintr(sc
) == 0)
955 sc
->sc_rxrun
= false;
958 /* printf("%s toredown complete in %u\n", __func__, i); */
961 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_WR_SOFT_RESET
, 1);
962 while(bus_space_read_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_WR_SOFT_RESET
) & 1);
965 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_SS_SOFT_RESET
, 1);
966 while(bus_space_read_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_SS_SOFT_RESET
) & 1);
968 for (i
= 0; i
< 2; i
++) {
969 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_SL_SOFT_RESET(i
), 1);
970 while(bus_space_read_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_SL_SOFT_RESET(i
)) & 1);
974 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_SOFT_RESET
, 1);
975 while(bus_space_read_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_SOFT_RESET
) & 1);
977 /* Release any queued transmit buffers. */
978 for (i
= 0; i
< CPSW_NTXDESCS
; i
++) {
979 bus_dmamap_unload(sc
->sc_bdt
, rdp
->tx_dm
[i
]);
980 m_freem(rdp
->tx_mb
[i
]);
981 rdp
->tx_mb
[i
] = NULL
;
984 ifp
->if_flags
&= ~IFF_RUNNING
;
986 ifq_clr_oactive(&ifp
->if_snd
);
988 /* XXX Not sure what this is doing calling disable here
989 where is disable set?
996 for (i
= 0; i
< CPSW_NRXDESCS
; i
++) {
997 bus_dmamap_unload(sc
->sc_bdt
, rdp
->rx_dm
[i
]);
998 m_freem(rdp
->rx_mb
[i
]);
999 rdp
->rx_mb
[i
] = NULL
;
1004 cpsw_rxthintr(void *arg
)
1006 struct cpsw_softc
* const sc
= arg
;
1008 /* this won't deassert the interrupt though */
1009 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_CPDMA_EOI_VECTOR
, CPSW_INTROFF_RXTH
);
1015 cpsw_rxintr(void *arg
)
1017 struct cpsw_softc
* const sc
= arg
;
1018 struct ifnet
* const ifp
= &sc
->sc_ac
.ac_if
;
1019 struct cpsw_ring_data
* const rdp
= sc
->sc_rdp
;
1020 struct cpsw_cpdma_bd bd
;
1022 struct mbuf_list ml
= MBUF_LIST_INITIALIZER();
1027 sc
->sc_rxeoq
= false;
1030 KASSERT(sc
->sc_rxhead
< CPSW_NRXDESCS
);
1036 KASSERT(dm
!= NULL
);
1039 cpsw_get_rxdesc(sc
, i
, &bd
);
1041 if (bd
.flags
& CPDMA_BD_OWNER
)
1044 if (bd
.flags
& CPDMA_BD_TDOWNCMPLT
) {
1045 sc
->sc_rxrun
= false;
1049 bus_dmamap_sync(sc
->sc_bdt
, dm
, 0, dm
->dm_mapsize
,
1050 BUS_DMASYNC_POSTREAD
);
1052 if (cpsw_new_rxbuf(sc
, i
) != 0) {
1053 /* drop current packet, reuse buffer for new */
1058 if ((bd
.flags
& (CPDMA_BD_SOP
|CPDMA_BD_EOP
)) !=
1059 (CPDMA_BD_SOP
|CPDMA_BD_EOP
)) {
1060 if (bd
.flags
& CPDMA_BD_SOP
) {
1061 printf("cpsw: rx packet too large\n");
1071 if (bd
.flags
& CPDMA_BD_PASSCRC
)
1072 len
-= ETHER_CRC_LEN
;
1074 m
->m_pkthdr
.len
= m
->m_len
= len
;
1080 sc
->sc_rxhead
= RXDESC_NEXT(sc
->sc_rxhead
);
1081 if (bd
.flags
& CPDMA_BD_EOQ
) {
1082 sc
->sc_rxeoq
= true;
1083 sc
->sc_rxrun
= false;
1085 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_RX_CP(0),
1086 cpsw_rxdesc_paddr(sc
, i
));
1090 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_RX_HDP(0),
1091 cpsw_rxdesc_paddr(sc
, sc
->sc_rxhead
));
1092 sc
->sc_rxrun
= true;
1093 sc
->sc_rxeoq
= false;
1096 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_CPDMA_EOI_VECTOR
,
1106 cpsw_tick(void *arg
)
1108 struct cpsw_softc
*sc
= arg
;
1112 mii_tick(&sc
->sc_mii
);
1115 timeout_add_sec(&sc
->sc_tick
, 1);
1119 cpsw_txintr(void *arg
)
1121 struct cpsw_softc
* const sc
= arg
;
1122 struct ifnet
* const ifp
= &sc
->sc_ac
.ac_if
;
1123 struct cpsw_ring_data
* const rdp
= sc
->sc_rdp
;
1124 struct cpsw_cpdma_bd bd
;
1125 bool handled
= false;
1129 KASSERT(sc
->sc_txrun
);
1131 tx0_cp
= bus_space_read_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_TX_CP(0));
1133 if (tx0_cp
== 0xfffffffc) {
1134 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_TX_CP(0), 0xfffffffc);
1135 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_TX_HDP(0), 0);
1136 sc
->sc_txrun
= false;
1141 tx0_cp
= bus_space_read_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_TX_CP(0));
1142 cpi
= (tx0_cp
- sc
->sc_txdescs_pa
) /
1143 sizeof(struct cpsw_cpdma_bd
);
1144 KASSERT(sc
->sc_txhead
< CPSW_NTXDESCS
);
1146 cpsw_get_txdesc(sc
, sc
->sc_txhead
, &bd
);
1148 if (bd
.buflen
== 0) {
1152 if ((bd
.flags
& CPDMA_BD_SOP
) == 0)
1155 if (bd
.flags
& CPDMA_BD_OWNER
) {
1156 printf("pwned %x %x %x\n", cpi
, sc
->sc_txhead
,
1161 if (bd
.flags
& CPDMA_BD_TDOWNCMPLT
) {
1162 sc
->sc_txrun
= false;
1166 bus_dmamap_sync(sc
->sc_bdt
, rdp
->tx_dm
[sc
->sc_txhead
],
1167 0, rdp
->tx_dm
[sc
->sc_txhead
]->dm_mapsize
,
1168 BUS_DMASYNC_POSTWRITE
);
1169 bus_dmamap_unload(sc
->sc_bdt
, rdp
->tx_dm
[sc
->sc_txhead
]);
1171 m_freem(rdp
->tx_mb
[sc
->sc_txhead
]);
1172 rdp
->tx_mb
[sc
->sc_txhead
] = NULL
;
1178 ifq_clr_oactive(&ifp
->if_snd
);
1181 if ((bd
.flags
& (CPDMA_BD_EOP
|CPDMA_BD_EOQ
)) ==
1182 (CPDMA_BD_EOP
|CPDMA_BD_EOQ
))
1183 sc
->sc_txeoq
= true;
1185 if (sc
->sc_txhead
== cpi
) {
1186 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_TX_CP(0),
1187 cpsw_txdesc_paddr(sc
, cpi
));
1188 sc
->sc_txhead
= TXDESC_NEXT(sc
->sc_txhead
);
1191 sc
->sc_txhead
= TXDESC_NEXT(sc
->sc_txhead
);
1192 if (sc
->sc_txeoq
== true)
1196 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_CPDMA_EOI_VECTOR
, CPSW_INTROFF_TX
);
1198 if ((sc
->sc_txnext
!= sc
->sc_txhead
) && sc
->sc_txeoq
) {
1199 if (bus_space_read_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_TX_HDP(0)) == 0) {
1200 sc
->sc_txeoq
= false;
1201 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_TX_HDP(0),
1202 cpsw_txdesc_paddr(sc
, sc
->sc_txhead
));
1206 if (handled
&& sc
->sc_txnext
== sc
->sc_txhead
)
1216 cpsw_miscintr(void *arg
)
1218 struct cpsw_softc
* const sc
= arg
;
1223 miscstat
= bus_space_read_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_WR_C_MISC_STAT(0));
1224 printf("%s %x FIRE\n", __func__
, miscstat
);
1226 if (miscstat
& CPSW_MISC_HOST_PEND
) {
1228 dmastat
= bus_space_read_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_DMA_INTSTAT_MASKED
);
1229 printf("CPSW_CPDMA_DMA_INTSTAT_MASKED %x\n", dmastat
);
1231 printf("rxhead %02x\n", sc
->sc_rxhead
);
1233 stat
= bus_space_read_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_DMASTATUS
);
1234 printf("CPSW_CPDMA_DMASTATUS %x\n", stat
);
1235 stat
= bus_space_read_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_TX_HDP(0));
1236 printf("CPSW_CPDMA_TX0_HDP %x\n", stat
);
1237 stat
= bus_space_read_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_TX_CP(0));
1238 printf("CPSW_CPDMA_TX0_CP %x\n", stat
);
1239 stat
= bus_space_read_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_RX_HDP(0));
1240 printf("CPSW_CPDMA_RX0_HDP %x\n", stat
);
1241 stat
= bus_space_read_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_RX_CP(0));
1242 printf("CPSW_CPDMA_RX0_CP %x\n", stat
);
1246 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_DMA_INTMASK_CLEAR
, dmastat
);
1247 dmastat
= bus_space_read_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_DMA_INTSTAT_MASKED
);
1248 printf("CPSW_CPDMA_DMA_INTSTAT_MASKED %x\n", dmastat
);
1251 bus_space_write_4(sc
->sc_bst
, sc
->sc_bsh
, CPSW_CPDMA_CPDMA_EOI_VECTOR
, CPSW_INTROFF_MISC
);