ioat: spatch the IOAT driver for i in scripts/spatch/linux/*.cocci do ./scripts/spatch/spatch-me.sh $i yes kern/drivers/dma/ioat/ done Signed-off-by: Barret Rhoden <brho@cs.berkeley.edu>
diff --git a/kern/drivers/dma/ioat/dma.c b/kern/drivers/dma/ioat/dma.c index f373a13..2554f37 100644 --- a/kern/drivers/dma/ioat/dma.c +++ b/kern/drivers/dma/ioat/dma.c
@@ -71,7 +71,8 @@ static void ioat_eh(struct ioatdma_chan *ioat_chan); -static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr) +static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, + uint32_t chanerr) { int i; @@ -88,33 +89,33 @@ * @irq: interrupt id * @data: interrupt data */ -irqreturn_t ioat_dma_do_interrupt(int irq, void *data) +void ioat_dma_do_interrupt(struct hw_trapframe *hw_tf, void *data) { struct ioatdma_device *instance = data; struct ioatdma_chan *ioat_chan; unsigned long attnstatus; int bit; - u8 intrctrl; + uint8_t intrctrl; - intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET); + intrctrl = read8(instance->reg_base + IOAT_INTRCTRL_OFFSET); if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN)) - return IRQ_NONE; + return; if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) { - writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); - return IRQ_NONE; + write8(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); + return; } - attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); + attnstatus = read32(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) { ioat_chan = ioat_chan_by_index(instance, bit); if (test_bit(IOAT_RUN, &ioat_chan->state)) tasklet_schedule(&ioat_chan->cleanup_task); } - writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); - return IRQ_HANDLED; + write8(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); + return; } /** @@ -122,20 +123,20 @@ * @irq: interrupt id * @data: interrupt data */ -irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) +void ioat_dma_do_interrupt_msix(struct hw_trapframe *hw_tf, void *data) { struct ioatdma_chan *ioat_chan = data; if (test_bit(IOAT_RUN, &ioat_chan->state)) tasklet_schedule(&ioat_chan->cleanup_task); - return IRQ_HANDLED; + return; } void ioat_stop(struct ioatdma_chan *ioat_chan) { struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; - struct pci_dev *pdev = ioat_dma->pdev; + struct pci_device *pdev = ioat_dma->pdev; int chan_id = chan_num(ioat_chan); struct msix_entry *msix; @@ -152,7 +153,7 @@ break; case IOAT_MSI: case IOAT_INTX: - synchronize_irq(pdev->irq); + synchronize_irq(pdev->irqline); break; default: break; @@ -172,7 +173,7 @@ { ioat_chan->dmacount += ioat_ring_pending(ioat_chan); ioat_chan->issued = ioat_chan->head; - writew(ioat_chan->dmacount, + write16(ioat_chan->dmacount, ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x count: %#x\n", @@ -185,9 +186,9 @@ struct ioatdma_chan *ioat_chan = to_ioat_chan(c); if (ioat_ring_pending(ioat_chan)) { - spin_lock_bh(&ioat_chan->prep_lock); + spin_lock(&ioat_chan->prep_lock); __ioat_issue_pending(ioat_chan); - spin_unlock_bh(&ioat_chan->prep_lock); + spin_unlock(&ioat_chan->prep_lock); } } @@ -240,10 +241,10 @@ void ioat_start_null_desc(struct ioatdma_chan *ioat_chan) { - spin_lock_bh(&ioat_chan->prep_lock); + spin_lock(&ioat_chan->prep_lock); if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) __ioat_start_null_desc(ioat_chan); - spin_unlock_bh(&ioat_chan->prep_lock); + spin_unlock(&ioat_chan->prep_lock); } static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan) @@ -272,7 +273,7 @@ { unsigned long end = jiffies + tmo; int err = 0; - u32 status; + uint32_t status; status = ioat_chansts(ioat_chan); if (is_ioat_active(status) || is_ioat_idle(status)) @@ -328,7 +329,7 @@ ioat_chan->head += ioat_chan->produce; ioat_update_pending(ioat_chan); - spin_unlock_bh(&ioat_chan->prep_lock); + spin_unlock(&ioat_chan->prep_lock); return cookie; } @@ -341,13 +342,13 @@ struct ioatdma_chan *ioat_chan = to_ioat_chan(chan); int chunk; dma_addr_t phys; - u8 *pos; + uint8_t *pos; off_t offs; chunk = idx / IOAT_DESCS_PER_2M; idx &= (IOAT_DESCS_PER_2M - 1); offs = idx * IOAT_DESC_SZ; - pos = (u8 *)ioat_chan->descs[chunk].virt + offs; + pos = (uint8_t *)ioat_chan->descs[chunk].virt + offs; phys = ioat_chan->descs[chunk].hw + offs; hw = (struct ioat_dma_descriptor *)pos; memset(hw, 0, sizeof(*hw)); @@ -378,7 +379,7 @@ int i, chunks; /* allocate the array to hold the software ring */ - ring = kcalloc(total_descs, sizeof(*ring), flags); + ring = kzmalloc((total_descs) * (sizeof(*ring)), flags); if (!ring) return NULL; @@ -440,12 +441,12 @@ /* setup descriptor pre-fetching for v3.4 */ if (ioat_dma->cap & IOAT_CAP_DPS) { - u16 drsctl = IOAT_CHAN_DRSZ_2MB | IOAT_CHAN_DRS_EN; + uint16_t drsctl = IOAT_CHAN_DRSZ_2MB | IOAT_CHAN_DRS_EN; if (chunks == 1) drsctl |= IOAT_CHAN_DRS_AUTOWRAP; - writew(drsctl, ioat_chan->reg_base + IOAT_CHAN_DRSCTL_OFFSET); + write16(drsctl, ioat_chan->reg_base + IOAT_CHAN_DRSCTL_OFFSET); } @@ -460,7 +461,7 @@ int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs) __acquires(&ioat_chan->prep_lock) { - spin_lock_bh(&ioat_chan->prep_lock); + spin_lock(&ioat_chan->prep_lock); /* never allow the last descriptor to be consumed, we need at * least one free at all times to allow for on-the-fly ring * resizing. @@ -472,7 +473,7 @@ ioat_chan->produce = num_descs; return 0; /* with ioat->prep_lock held */ } - spin_unlock_bh(&ioat_chan->prep_lock); + spin_unlock(&ioat_chan->prep_lock); dev_dbg_ratelimited(to_dev(ioat_chan), "%s: ring full! num_descs: %d (%x:%x:%x)\n", @@ -523,10 +524,10 @@ kmem_cache_free(ioat_sed_cache, sed); } -static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan) +static uint64_t ioat_get_current_completion(struct ioatdma_chan *ioat_chan) { - u64 phys_complete; - u64 completion; + uint64_t phys_complete; + uint64_t completion; completion = *ioat_chan->completion; phys_complete = ioat_chansts_to_addr(completion); @@ -538,7 +539,7 @@ } static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan, - u64 *phys_complete) + uint64_t *phys_complete) { *phys_complete = ioat_get_current_completion(ioat_chan); if (*phys_complete == ioat_chan->last_completion) @@ -590,7 +591,7 @@ struct ioat_ring_ent *desc; bool seen_current = false; int idx = ioat_chan->tail, i; - u16 active; + uint16_t active; dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n", __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued); @@ -631,7 +632,7 @@ /* skip extended descriptors */ if (desc_has_ext(desc)) { - BUG_ON(i + 1 >= active); + assert(!(i + 1 >= active)); i++; } @@ -643,10 +644,10 @@ } /* finish all descriptor reads before incrementing tail */ - smp_mb(); + mb(); ioat_chan->tail = idx + i; /* no active descs have written a completion? */ - BUG_ON(active && !seen_current); + assert(!(active && !seen_current)); ioat_chan->last_completion = phys_complete; if (active - i == 0) { @@ -657,8 +658,7 @@ /* microsecond delay by sysfs variable per pending descriptor */ if (ioat_chan->intr_coalesce != ioat_chan->prev_intr_coalesce) { - writew(min((ioat_chan->intr_coalesce * (active - i)), - IOAT_INTRDELAY_MASK), + write16(MIN((ioat_chan->intr_coalesce * (active - i)), IOAT_INTRDELAY_MASK), ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET); ioat_chan->prev_intr_coalesce = ioat_chan->intr_coalesce; } @@ -666,15 +666,15 @@ static void ioat_cleanup(struct ioatdma_chan *ioat_chan) { - u64 phys_complete; + uint64_t phys_complete; - spin_lock_bh(&ioat_chan->cleanup_lock); + spin_lock(&ioat_chan->cleanup_lock); if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) __cleanup(ioat_chan, phys_complete); if (is_ioat_halted(*ioat_chan->completion)) { - u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + uint32_t chanerr = read32(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); if (chanerr & (IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) { @@ -683,7 +683,7 @@ } } - spin_unlock_bh(&ioat_chan->cleanup_lock); + spin_unlock(&ioat_chan->cleanup_lock); } void ioat_cleanup_event(unsigned long data) @@ -693,17 +693,17 @@ ioat_cleanup(ioat_chan); if (!test_bit(IOAT_RUN, &ioat_chan->state)) return; - writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); + write16(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); } static void ioat_restart_channel(struct ioatdma_chan *ioat_chan) { - u64 phys_complete; + uint64_t phys_complete; /* set the completion address register again */ - writel(lower_32_bits(ioat_chan->completion_dma), + write32(lower_32_bits(ioat_chan->completion_dma), ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); - writel(upper_32_bits(ioat_chan->completion_dma), + write32(upper_32_bits(ioat_chan->completion_dma), ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); ioat_quiesce(ioat_chan, 0); @@ -718,7 +718,7 @@ { struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; struct ioat_ring_ent *desc; - u16 active; + uint16_t active; int idx = ioat_chan->tail, i; /* @@ -749,7 +749,7 @@ /* skip extended descriptors */ if (desc_has_ext(desc)) { - WARN_ON(i + 1 >= active); + warn_on(i + 1 >= active); i++; } @@ -760,7 +760,7 @@ } } - smp_mb(); /* finish all descriptor reads before incrementing tail */ + mb(); /* finish all descriptor reads before incrementing tail */ ioat_chan->tail = idx + active; desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail); @@ -769,14 +769,14 @@ static void ioat_eh(struct ioatdma_chan *ioat_chan) { - struct pci_dev *pdev = to_pdev(ioat_chan); + struct pci_device *pdev = to_pdev(ioat_chan); struct ioat_dma_descriptor *hw; struct dma_async_tx_descriptor *tx; - u64 phys_complete; + uint64_t phys_complete; struct ioat_ring_ent *desc; - u32 err_handled = 0; - u32 chanerr_int; - u32 chanerr; + uint32_t err_handled = 0; + uint32_t chanerr_int; + uint32_t chanerr; bool abort = false; struct dmaengine_result res; @@ -784,7 +784,7 @@ if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) __cleanup(ioat_chan, phys_complete); - chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + chanerr = read32(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int); dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n", @@ -836,7 +836,7 @@ dev_err(to_dev(ioat_chan), "Errors not handled:\n"); ioat_print_chanerrs(ioat_chan, (chanerr & ~err_handled)); - BUG(); + panic("BUG"); } /* cleanup the faulty descriptor since we are continuing */ @@ -852,7 +852,7 @@ /* mark faulting descriptor as complete */ *ioat_chan->completion = desc->txd.phys; - spin_lock_bh(&ioat_chan->prep_lock); + spin_lock(&ioat_chan->prep_lock); /* we need abort all descriptors */ if (abort) { ioat_abort_descs(ioat_chan); @@ -860,11 +860,11 @@ ioat_reset_hw(ioat_chan); } - writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + write32(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int); ioat_restart_channel(ioat_chan); - spin_unlock_bh(&ioat_chan->prep_lock); + spin_unlock(&ioat_chan->prep_lock); } static void check_active(struct ioatdma_chan *ioat_chan) @@ -882,7 +882,7 @@ { struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer); dma_addr_t phys_complete; - u64 status; + uint64_t status; status = ioat_chansts(ioat_chan); @@ -890,19 +890,19 @@ * programming errors before advancing the completion state */ if (is_ioat_halted(status)) { - u32 chanerr; + uint32_t chanerr; - chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + chanerr = read32(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n", __func__, chanerr); dev_err(to_dev(ioat_chan), "Errors:\n"); ioat_print_chanerrs(ioat_chan, chanerr); if (test_bit(IOAT_RUN, &ioat_chan->state)) { - spin_lock_bh(&ioat_chan->cleanup_lock); - spin_lock_bh(&ioat_chan->prep_lock); + spin_lock(&ioat_chan->cleanup_lock); + spin_lock(&ioat_chan->prep_lock); set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); - spin_unlock_bh(&ioat_chan->prep_lock); + spin_unlock(&ioat_chan->prep_lock); ioat_abort_descs(ioat_chan); dev_warn(to_dev(ioat_chan), "Reset channel...\n"); @@ -910,23 +910,23 @@ dev_warn(to_dev(ioat_chan), "Restart channel...\n"); ioat_restart_channel(ioat_chan); - spin_lock_bh(&ioat_chan->prep_lock); + spin_lock(&ioat_chan->prep_lock); clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state); - spin_unlock_bh(&ioat_chan->prep_lock); - spin_unlock_bh(&ioat_chan->cleanup_lock); + spin_unlock(&ioat_chan->prep_lock); + spin_unlock(&ioat_chan->cleanup_lock); } return; } - spin_lock_bh(&ioat_chan->cleanup_lock); + spin_lock(&ioat_chan->cleanup_lock); /* handle the no-actives case */ if (!ioat_ring_active(ioat_chan)) { - spin_lock_bh(&ioat_chan->prep_lock); + spin_lock(&ioat_chan->prep_lock); check_active(ioat_chan); - spin_unlock_bh(&ioat_chan->prep_lock); - spin_unlock_bh(&ioat_chan->cleanup_lock); + spin_unlock(&ioat_chan->prep_lock); + spin_unlock(&ioat_chan->cleanup_lock); return; } @@ -937,9 +937,9 @@ if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) __cleanup(ioat_chan, phys_complete); else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) { - u32 chanerr; + uint32_t chanerr; - chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + chanerr = read32(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n", status, chanerr); dev_err(to_dev(ioat_chan), "Errors:\n"); @@ -948,9 +948,9 @@ dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n", ioat_ring_active(ioat_chan)); - spin_lock_bh(&ioat_chan->prep_lock); + spin_lock(&ioat_chan->prep_lock); set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); - spin_unlock_bh(&ioat_chan->prep_lock); + spin_unlock(&ioat_chan->prep_lock); ioat_abort_descs(ioat_chan); dev_warn(to_dev(ioat_chan), "Resetting channel...\n"); @@ -958,16 +958,16 @@ dev_warn(to_dev(ioat_chan), "Restarting channel...\n"); ioat_restart_channel(ioat_chan); - spin_lock_bh(&ioat_chan->prep_lock); + spin_lock(&ioat_chan->prep_lock); clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state); - spin_unlock_bh(&ioat_chan->prep_lock); - spin_unlock_bh(&ioat_chan->cleanup_lock); + spin_unlock(&ioat_chan->prep_lock); + spin_unlock(&ioat_chan->cleanup_lock); return; } else set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); - spin_unlock_bh(&ioat_chan->cleanup_lock); + spin_unlock(&ioat_chan->cleanup_lock); } enum dma_status @@ -992,22 +992,22 @@ * initialized, with ioat3 specific workarounds */ struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; - struct pci_dev *pdev = ioat_dma->pdev; - u32 chanerr; - u16 dev_id; + struct pci_device *pdev = ioat_dma->pdev; + uint32_t chanerr; + uint16_t dev_id; int err; ioat_quiesce(ioat_chan, msecs_to_jiffies(100)); - chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); - writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + chanerr = read32(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + write32(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); if (ioat_dma->version < IOAT_VER_3_3) { /* clear any pending errors */ err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); if (err) { - dev_err(&pdev->dev, + dev_err(&pdev->linux_dev, "channel error register unreachable\n"); return err; } @@ -1026,23 +1026,23 @@ } if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) { - ioat_dma->msixtba0 = readq(ioat_dma->reg_base + 0x1000); - ioat_dma->msixdata0 = readq(ioat_dma->reg_base + 0x1008); - ioat_dma->msixpba = readq(ioat_dma->reg_base + 0x1800); + ioat_dma->msixtba0 = read64(ioat_dma->reg_base + 0x1000); + ioat_dma->msixdata0 = read64(ioat_dma->reg_base + 0x1008); + ioat_dma->msixpba = read64(ioat_dma->reg_base + 0x1800); } err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200)); if (!err) { if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) { - writeq(ioat_dma->msixtba0, ioat_dma->reg_base + 0x1000); - writeq(ioat_dma->msixdata0, ioat_dma->reg_base + 0x1008); - writeq(ioat_dma->msixpba, ioat_dma->reg_base + 0x1800); + write64(ioat_dma->msixtba0, ioat_dma->reg_base + 0x1000); + write64(ioat_dma->msixdata0, ioat_dma->reg_base + 0x1008); + write64(ioat_dma->msixpba, ioat_dma->reg_base + 0x1800); } } if (err) - dev_err(&pdev->dev, "Failed to reset: %d\n", err); + dev_err(&pdev->linux_dev, "Failed to reset: %d\n", err); return err; }
diff --git a/kern/drivers/dma/ioat/dma.h b/kern/drivers/dma/ioat/dma.h index aaafd0e..737f5c5 100644 --- a/kern/drivers/dma/ioat/dma.h +++ b/kern/drivers/dma/ioat/dma.h
@@ -73,24 +73,24 @@ * @cap: read DMA capabilities register */ struct ioatdma_device { - struct pci_dev *pdev; + struct pci_device *pdev; void __iomem *reg_base; struct dma_pool *completion_pool; #define MAX_SED_POOLS 5 struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; struct dma_device dma_dev; - u8 version; + uint8_t version; #define IOAT_MAX_CHANS 4 struct msix_entry msix_entries[IOAT_MAX_CHANS]; struct ioatdma_chan *idx[IOAT_MAX_CHANS]; struct dca_provider *dca; enum ioat_irq_mode irq_mode; - u32 cap; + uint32_t cap; /* shadow version for CB3.3 chan reset errata workaround */ - u64 msixtba0; - u64 msixdata0; - u32 msixpba; + uint64_t msixtba0; + uint64_t msixdata0; + uint32_t msixpba; }; struct ioat_descs { @@ -116,7 +116,7 @@ #define RESET_DELAY msecs_to_jiffies(100) struct ioatdma_device *ioat_dma; dma_addr_t completion_dma; - u64 *completion; + uint64_t *completion; struct tasklet_struct cleanup_task; struct kobject kobj; @@ -132,12 +132,12 @@ * @prep_lock: serializes descriptor preparation (producers) */ size_t xfercap_log; - u16 head; - u16 issued; - u16 tail; - u16 dmacount; - u16 alloc_order; - u16 produce; + uint16_t head; + uint16_t issued; + uint16_t tail; + uint16_t dmacount; + uint16_t alloc_order; + uint16_t produce; struct ioat_ring_ent **ring; spinlock_t prep_lock; struct ioat_descs descs[2]; @@ -247,43 +247,43 @@ return ioat_dma->idx[index]; } -static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan) +static inline uint64_t ioat_chansts(struct ioatdma_chan *ioat_chan) { - return readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET); + return read64(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET); } -static inline u64 ioat_chansts_to_addr(u64 status) +static inline uint64_t ioat_chansts_to_addr(uint64_t status) { return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; } -static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan) +static inline uint32_t ioat_chanerr(struct ioatdma_chan *ioat_chan) { - return readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + return read32(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); } static inline void ioat_suspend(struct ioatdma_chan *ioat_chan) { - u8 ver = ioat_chan->ioat_dma->version; + uint8_t ver = ioat_chan->ioat_dma->version; - writeb(IOAT_CHANCMD_SUSPEND, + write8(IOAT_CHANCMD_SUSPEND, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); } static inline void ioat_reset(struct ioatdma_chan *ioat_chan) { - u8 ver = ioat_chan->ioat_dma->version; + uint8_t ver = ioat_chan->ioat_dma->version; - writeb(IOAT_CHANCMD_RESET, + write8(IOAT_CHANCMD_RESET, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); } static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan) { - u8 ver = ioat_chan->ioat_dma->version; - u8 cmd; + uint8_t ver = ioat_chan->ioat_dma->version; + uint8_t cmd; - cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); + cmd = read8(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET; } @@ -317,51 +317,51 @@ #define IOAT_MAX_DESCS 65536 #define IOAT_DESCS_PER_2M 32768 -static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan) +static inline uint32_t ioat_ring_size(struct ioatdma_chan *ioat_chan) { return 1 << ioat_chan->alloc_order; } /* count of descriptors in flight with the engine */ -static inline u16 ioat_ring_active(struct ioatdma_chan *ioat_chan) +static inline uint16_t ioat_ring_active(struct ioatdma_chan *ioat_chan) { return CIRC_CNT(ioat_chan->head, ioat_chan->tail, ioat_ring_size(ioat_chan)); } /* count of descriptors pending submission to hardware */ -static inline u16 ioat_ring_pending(struct ioatdma_chan *ioat_chan) +static inline uint16_t ioat_ring_pending(struct ioatdma_chan *ioat_chan) { return CIRC_CNT(ioat_chan->head, ioat_chan->issued, ioat_ring_size(ioat_chan)); } -static inline u32 ioat_ring_space(struct ioatdma_chan *ioat_chan) +static inline uint32_t ioat_ring_space(struct ioatdma_chan *ioat_chan) { return ioat_ring_size(ioat_chan) - ioat_ring_active(ioat_chan); } -static inline u16 +static inline uint16_t ioat_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len) { - u16 num_descs = len >> ioat_chan->xfercap_log; + uint16_t num_descs = len >> ioat_chan->xfercap_log; num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1)); return num_descs; } static inline struct ioat_ring_ent * -ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx) +ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, uint16_t idx) { return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)]; } static inline void -ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr) +ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, uint64_t addr) { - writel(addr & 0x00000000FFFFFFFF, + write32(addr & 0x00000000FFFFFFFF, ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); - writel(addr >> 32, + write32(addr >> 32, ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); } @@ -395,8 +395,8 @@ enum sum_check_flags *result, unsigned long flags); /* IOAT Operation functions */ -irqreturn_t ioat_dma_do_interrupt(int irq, void *data); -irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data); +void ioat_dma_do_interrupt(struct hw_trapframe *hw_tf, void *data); +void ioat_dma_do_interrupt_msix(struct hw_trapframe *hw_tf, void *data); struct ioat_ring_ent ** ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags); void ioat_start_null_desc(struct ioatdma_chan *ioat_chan); @@ -411,8 +411,9 @@ void ioat_issue_pending(struct dma_chan *chan); /* IOAT Init functions */ -bool is_bwd_ioat(struct pci_dev *pdev); -struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); +bool is_bwd_ioat(struct pci_device *pdev); +struct dca_provider *ioat_dca_init(struct pci_device *pdev, + void __iomem *iobase); void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type); void ioat_kobject_del(struct ioatdma_device *ioat_dma); int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma);
diff --git a/kern/drivers/dma/ioat/hw.h b/kern/drivers/dma/ioat/hw.h index 781c94d..96a99da 100644 --- a/kern/drivers/dma/ioat/hw.h +++ b/kern/drivers/dma/ioat/hw.h
@@ -76,7 +76,7 @@ #define IOAT_VER_3_4 0x34 /* Version 3.4 */ -int system_has_dca_enabled(struct pci_dev *pdev); +int system_has_dca_enabled(struct pci_device *pdev); #define IOAT_DESC_SZ 64
diff --git a/kern/drivers/dma/ioat/init.c b/kern/drivers/dma/ioat/init.c index d41dc9a..d3a7382 100644 --- a/kern/drivers/dma/ioat/init.c +++ b/kern/drivers/dma/ioat/init.c
@@ -126,8 +126,9 @@ }; MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); -static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); -static void ioat_remove(struct pci_dev *pdev); +static int ioat_pci_probe(struct pci_device *pdev, + const struct pci_device_id *id); +static void ioat_remove(struct pci_device *pdev); static void ioat_init_channel(struct ioatdma_device *ioat_dma, struct ioatdma_chan *ioat_chan, int idx); @@ -151,9 +152,9 @@ struct kmem_cache *ioat_cache; struct kmem_cache *ioat_sed_cache; -static bool is_jf_ioat(struct pci_dev *pdev) +static bool is_jf_ioat(struct pci_device *pdev) { - switch (pdev->device) { + switch (pdev->dev_id) { case PCI_DEVICE_ID_INTEL_IOAT_JSF0: case PCI_DEVICE_ID_INTEL_IOAT_JSF1: case PCI_DEVICE_ID_INTEL_IOAT_JSF2: @@ -170,9 +171,9 @@ } } -static bool is_snb_ioat(struct pci_dev *pdev) +static bool is_snb_ioat(struct pci_device *pdev) { - switch (pdev->device) { + switch (pdev->dev_id) { case PCI_DEVICE_ID_INTEL_IOAT_SNB0: case PCI_DEVICE_ID_INTEL_IOAT_SNB1: case PCI_DEVICE_ID_INTEL_IOAT_SNB2: @@ -189,9 +190,9 @@ } } -static bool is_ivb_ioat(struct pci_dev *pdev) +static bool is_ivb_ioat(struct pci_device *pdev) { - switch (pdev->device) { + switch (pdev->dev_id) { case PCI_DEVICE_ID_INTEL_IOAT_IVB0: case PCI_DEVICE_ID_INTEL_IOAT_IVB1: case PCI_DEVICE_ID_INTEL_IOAT_IVB2: @@ -209,9 +210,9 @@ } -static bool is_hsw_ioat(struct pci_dev *pdev) +static bool is_hsw_ioat(struct pci_device *pdev) { - switch (pdev->device) { + switch (pdev->dev_id) { case PCI_DEVICE_ID_INTEL_IOAT_HSW0: case PCI_DEVICE_ID_INTEL_IOAT_HSW1: case PCI_DEVICE_ID_INTEL_IOAT_HSW2: @@ -229,9 +230,9 @@ } -static bool is_bdx_ioat(struct pci_dev *pdev) +static bool is_bdx_ioat(struct pci_device *pdev) { - switch (pdev->device) { + switch (pdev->dev_id) { case PCI_DEVICE_ID_INTEL_IOAT_BDX0: case PCI_DEVICE_ID_INTEL_IOAT_BDX1: case PCI_DEVICE_ID_INTEL_IOAT_BDX2: @@ -248,20 +249,20 @@ } } -static inline bool is_skx_ioat(struct pci_dev *pdev) +static inline bool is_skx_ioat(struct pci_device *pdev) { - return (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SKX) ? true : false; + return (pdev->dev_id == PCI_DEVICE_ID_INTEL_IOAT_SKX) ? true : false; } -static bool is_xeon_cb32(struct pci_dev *pdev) +static bool is_xeon_cb32(struct pci_device *pdev) { return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) || is_hsw_ioat(pdev) || is_bdx_ioat(pdev) || is_skx_ioat(pdev); } -bool is_bwd_ioat(struct pci_dev *pdev) +bool is_bwd_ioat(struct pci_device *pdev) { - switch (pdev->device) { + switch (pdev->dev_id) { case PCI_DEVICE_ID_INTEL_IOAT_BWD0: case PCI_DEVICE_ID_INTEL_IOAT_BWD1: case PCI_DEVICE_ID_INTEL_IOAT_BWD2: @@ -277,9 +278,9 @@ } } -static bool is_bwd_noraid(struct pci_dev *pdev) +static bool is_bwd_noraid(struct pci_device *pdev) { - switch (pdev->device) { + switch (pdev->dev_id) { case PCI_DEVICE_ID_INTEL_IOAT_BWD2: case PCI_DEVICE_ID_INTEL_IOAT_BWD3: case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0: @@ -312,10 +313,10 @@ static int ioat_dma_self_test(struct ioatdma_device *ioat_dma) { int i; - u8 *src; - u8 *dest; + uint8_t *src; + uint8_t *dest; struct dma_device *dma = &ioat_dma->dma_dev; - struct device *dev = &ioat_dma->pdev->dev; + struct device *dev = &ioat_dma->pdev->linux_dev; struct dma_chan *dma_chan; struct dma_async_tx_descriptor *tx; dma_addr_t dma_dest, dma_src; @@ -325,10 +326,10 @@ unsigned long tmo; unsigned long flags; - src = kzalloc(IOAT_TEST_SIZE, GFP_KERNEL); + src = kzmalloc(IOAT_TEST_SIZE, MEM_WAIT); if (!src) return -ENOMEM; - dest = kzalloc(IOAT_TEST_SIZE, GFP_KERNEL); + dest = kzmalloc(IOAT_TEST_SIZE, MEM_WAIT); if (!dest) { kfree(src); return -ENOMEM; @@ -336,7 +337,7 @@ /* Fill in src buffer */ for (i = 0; i < IOAT_TEST_SIZE; i++) - src[i] = (u8)i; + src[i] = (uint8_t)i; /* Start copy, using first DMA channel */ dma_chan = container_of(dma->channels.next, struct dma_chan, @@ -415,12 +416,12 @@ int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma) { struct ioatdma_chan *ioat_chan; - struct pci_dev *pdev = ioat_dma->pdev; - struct device *dev = &pdev->dev; + struct pci_device *pdev = ioat_dma->pdev; + struct device *dev = &pdev->linux_dev; struct msix_entry *msix; int i, j, msixcnt; int err = -EINVAL; - u8 intrctrl = 0; + uint8_t intrctrl = 0; if (!strcmp(ioat_interrupt_style, "msix")) goto msix; @@ -465,7 +466,7 @@ if (err) goto intx; - err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0, + err = devm_request_irq(dev, pdev->irqline, ioat_dma_do_interrupt, 0, "ioat-msi", ioat_dma); if (err) { pci_disable_msi(pdev); @@ -475,7 +476,7 @@ goto done; intx: - err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, + err = devm_request_irq(dev, pdev->irqline, ioat_dma_do_interrupt, IRQF_SHARED, "ioat-intx", ioat_dma); if (err) goto err_no_irq; @@ -485,12 +486,12 @@ if (is_bwd_ioat(pdev)) ioat_intr_quirk(ioat_dma); intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; - writeb(intrctrl, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); + write8(intrctrl, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); return 0; err_no_irq: /* Disable all interrupt generation */ - writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); + write8(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); ioat_dma->irq_mode = IOAT_NOIRQ; dev_err(dev, "no usable interrupts\n"); return err; @@ -499,18 +500,18 @@ static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma) { /* Disable all interrupt generation */ - writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); + write8(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); } static int ioat_probe(struct ioatdma_device *ioat_dma) { int err = -ENODEV; struct dma_device *dma = &ioat_dma->dma_dev; - struct pci_dev *pdev = ioat_dma->pdev; - struct device *dev = &pdev->dev; + struct pci_device *pdev = ioat_dma->pdev; + struct device *dev = &pdev->linux_dev; ioat_dma->completion_pool = dma_pool_create("completion_pool", dev, - sizeof(u64), + sizeof(uint64_t), SMP_CACHE_BYTES, SMP_CACHE_BYTES); @@ -522,7 +523,7 @@ ioat_enumerate_channels(ioat_dma); dma_cap_set(DMA_MEMCPY, dma->cap_mask); - dma->dev = &pdev->dev; + dma->dev = &pdev->linux_dev; if (!dma->chancnt) { dev_err(dev, "channel enumeration error\n"); @@ -581,33 +582,33 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma) { struct ioatdma_chan *ioat_chan; - struct device *dev = &ioat_dma->pdev->dev; + struct device *dev = &ioat_dma->pdev->linux_dev; struct dma_device *dma = &ioat_dma->dma_dev; - u8 xfercap_log; + uint8_t xfercap_log; int i; INIT_LIST_HEAD(&dma->channels); - dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET); + dma->chancnt = read8(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET); dma->chancnt &= 0x1f; /* bits [4:0] valid */ if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) { dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", dma->chancnt, ARRAY_SIZE(ioat_dma->idx)); dma->chancnt = ARRAY_SIZE(ioat_dma->idx); } - xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET); + xfercap_log = read8(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET); xfercap_log &= 0x1f; /* bits [4:0] valid */ if (xfercap_log == 0) return; dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); for (i = 0; i < dma->chancnt; i++) { - ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL); + ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), MEM_WAIT); if (!ioat_chan) break; ioat_init_channel(ioat_dma, ioat_chan, i); ioat_chan->xfercap_log = xfercap_log; - spin_lock_init(&ioat_chan->prep_lock); + spinlock_init_irqsave(&ioat_chan->prep_lock); if (ioat_reset_hw(ioat_chan)) { i = 0; break; @@ -640,11 +641,11 @@ /* Put LTR to idle */ if (ioat_dma->version >= IOAT_VER_3_4) - writeb(IOAT_CHAN_LTR_SWSEL_IDLE, + write8(IOAT_CHAN_LTR_SWSEL_IDLE, ioat_chan->reg_base + IOAT_CHAN_LTR_SWSEL_OFFSET); - spin_lock_bh(&ioat_chan->cleanup_lock); - spin_lock_bh(&ioat_chan->prep_lock); + spin_lock(&ioat_chan->cleanup_lock); + spin_lock(&ioat_chan->prep_lock); descs = ioat_ring_space(ioat_chan); dev_dbg(to_dev(ioat_chan), "freeing %d idle descriptors\n", descs); for (i = 0; i < descs; i++) { @@ -676,8 +677,8 @@ ioat_chan->alloc_order = 0; dma_pool_free(ioat_dma->completion_pool, ioat_chan->completion, ioat_chan->completion_dma); - spin_unlock_bh(&ioat_chan->prep_lock); - spin_unlock_bh(&ioat_chan->cleanup_lock); + spin_unlock(&ioat_chan->prep_lock); + spin_unlock(&ioat_chan->cleanup_lock); ioat_chan->last_completion = 0; ioat_chan->completion_dma = 0; @@ -691,65 +692,65 @@ { struct ioatdma_chan *ioat_chan = to_ioat_chan(c); struct ioat_ring_ent **ring; - u64 status; + uint64_t status; int order; int i = 0; - u32 chanerr; + uint32_t chanerr; /* have we already been set up? */ if (ioat_chan->ring) return 1 << ioat_chan->alloc_order; /* Setup register to interrupt and write completion status on error */ - writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); + write16(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); /* allocate a completion writeback area */ /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ ioat_chan->completion = dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool, - GFP_NOWAIT, &ioat_chan->completion_dma); + MEM_ATOMIC, &ioat_chan->completion_dma); if (!ioat_chan->completion) return -ENOMEM; - writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF, + write32(((uint64_t)ioat_chan->completion_dma) & 0x00000000FFFFFFFF, ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); - writel(((u64)ioat_chan->completion_dma) >> 32, + write32(((uint64_t)ioat_chan->completion_dma) >> 32, ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); order = IOAT_MAX_ORDER; - ring = ioat_alloc_ring(c, order, GFP_NOWAIT); + ring = ioat_alloc_ring(c, order, MEM_ATOMIC); if (!ring) return -ENOMEM; - spin_lock_bh(&ioat_chan->cleanup_lock); - spin_lock_bh(&ioat_chan->prep_lock); + spin_lock(&ioat_chan->cleanup_lock); + spin_lock(&ioat_chan->prep_lock); ioat_chan->ring = ring; ioat_chan->head = 0; ioat_chan->issued = 0; ioat_chan->tail = 0; ioat_chan->alloc_order = order; set_bit(IOAT_RUN, &ioat_chan->state); - spin_unlock_bh(&ioat_chan->prep_lock); - spin_unlock_bh(&ioat_chan->cleanup_lock); + spin_unlock(&ioat_chan->prep_lock); + spin_unlock(&ioat_chan->cleanup_lock); /* Setting up LTR values for 3.4 or later */ if (ioat_chan->ioat_dma->version >= IOAT_VER_3_4) { - u32 lat_val; + uint32_t lat_val; lat_val = IOAT_CHAN_LTR_ACTIVE_SNVAL | IOAT_CHAN_LTR_ACTIVE_SNLATSCALE | IOAT_CHAN_LTR_ACTIVE_SNREQMNT; - writel(lat_val, ioat_chan->reg_base + + write32(lat_val, ioat_chan->reg_base + IOAT_CHAN_LTR_ACTIVE_OFFSET); lat_val = IOAT_CHAN_LTR_IDLE_SNVAL | IOAT_CHAN_LTR_IDLE_SNLATSCALE | IOAT_CHAN_LTR_IDLE_SNREQMNT; - writel(lat_val, ioat_chan->reg_base + + write32(lat_val, ioat_chan->reg_base + IOAT_CHAN_LTR_IDLE_OFFSET); /* Select to active */ - writeb(IOAT_CHAN_LTR_SWSEL_ACTIVE, + write8(IOAT_CHAN_LTR_SWSEL_ACTIVE, ioat_chan->reg_base + IOAT_CHAN_LTR_SWSEL_OFFSET); } @@ -765,7 +766,7 @@ if (is_ioat_active(status) || is_ioat_idle(status)) return 1 << ioat_chan->alloc_order; - chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + chanerr = read32(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); dev_WARN(to_dev(ioat_chan), "failed to start channel chanerr: %#x\n", chanerr); @@ -784,7 +785,7 @@ ioat_chan->ioat_dma = ioat_dma; ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1)); - spin_lock_init(&ioat_chan->cleanup_lock); + spinlock_init_irqsave(&ioat_chan->cleanup_lock); ioat_chan->dma_chan.device = dma; dma_cookie_init(&ioat_chan->dma_chan); list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels); @@ -805,15 +806,15 @@ struct dma_async_tx_descriptor *tx; struct dma_chan *dma_chan; dma_cookie_t cookie; - u8 cmp_byte = 0; - u32 cmp_word; - u32 xor_val_result; + uint8_t cmp_byte = 0; + uint32_t cmp_word; + uint32_t xor_val_result; int err = 0; struct completion cmp; unsigned long tmo; - struct device *dev = &ioat_dma->pdev->dev; + struct device *dev = &ioat_dma->pdev->linux_dev; struct dma_device *dma = &ioat_dma->dma_dev; - u8 op = 0; + uint8_t op = 0; dev_dbg(dev, "%s\n", __func__); @@ -821,31 +822,32 @@ return 0; for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { - xor_srcs[src_idx] = alloc_page(GFP_KERNEL); + xor_srcs[src_idx] = kva2page(kpages_alloc(PGSIZE, MEM_WAIT)); if (!xor_srcs[src_idx]) { while (src_idx--) - __free_page(xor_srcs[src_idx]); + kpages_free(page2kva(xor_srcs[src_idx]), + PGSIZE); return -ENOMEM; } } - dest = alloc_page(GFP_KERNEL); + dest = kva2page(kpages_alloc(PGSIZE, MEM_WAIT)); if (!dest) { while (src_idx--) - __free_page(xor_srcs[src_idx]); + kpages_free(page2kva(xor_srcs[src_idx]), PGSIZE); return -ENOMEM; } /* Fill in src buffers */ for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { - u8 *ptr = page_address(xor_srcs[src_idx]); + uint8_t *ptr = page_address(xor_srcs[src_idx]); for (i = 0; i < PAGE_SIZE; i++) ptr[i] = (1 << src_idx); } for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) - cmp_byte ^= (u8) (1 << src_idx); + cmp_byte ^= (uint8_t) (1 << src_idx); cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | (cmp_byte << 8) | cmp_byte; @@ -911,8 +913,8 @@ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); - for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { - u32 *ptr = page_address(dest); + for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); i++) { + uint32_t *ptr = page_address(dest); if (ptr[i] != cmp_word) { dev_err(dev, "Self-test xor failed compare\n"); @@ -1054,8 +1056,8 @@ out: src_idx = IOAT_NUM_SRC_TEST; while (src_idx--) - __free_page(xor_srcs[src_idx]); - __free_page(dest); + kpages_free(page2kva(xor_srcs[src_idx]), PGSIZE); + kpages_free(page2kva(dest), PGSIZE); return err; } @@ -1077,7 +1079,7 @@ struct dma_device *dma; struct dma_chan *c; struct ioatdma_chan *ioat_chan; - u32 errmask; + uint32_t errmask; dma = &ioat_dma->dma_dev; @@ -1088,11 +1090,11 @@ if (ioat_dma->cap & IOAT_CAP_DWBES) { list_for_each_entry(c, &dma->channels, device_node) { ioat_chan = to_ioat_chan(c); - errmask = readl(ioat_chan->reg_base + + errmask = read32(ioat_chan->reg_base + IOAT_CHANERR_MASK_OFFSET); errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR | IOAT_CHANERR_XOR_Q_ERR; - writel(errmask, ioat_chan->reg_base + + write32(errmask, ioat_chan->reg_base + IOAT_CHANERR_MASK_OFFSET); } } @@ -1100,13 +1102,13 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) { - struct pci_dev *pdev = ioat_dma->pdev; + struct pci_device *pdev = ioat_dma->pdev; int dca_en = system_has_dca_enabled(pdev); struct dma_device *dma; struct dma_chan *c; struct ioatdma_chan *ioat_chan; int err; - u16 val16; + uint16_t val16; dma = &ioat_dma->dma_dev; dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock; @@ -1117,7 +1119,7 @@ dma_cap_set(DMA_INTERRUPT, dma->cap_mask); dma->device_prep_dma_interrupt = ioat_prep_interrupt_lock; - ioat_dma->cap = readl(ioat_dma->reg_base + IOAT_DMA_CAP_OFFSET); + ioat_dma->cap = read32(ioat_dma->reg_base + IOAT_DMA_CAP_OFFSET); if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev)) ioat_dma->cap &= @@ -1174,7 +1176,7 @@ /* allocate SED DMA pool */ ioat_dma->sed_hw_pool[i] = dmam_pool_create(pool_name, - &pdev->dev, + &pdev->linux_dev, SED_SIZE * (i + 1), 64, 0); if (!ioat_dma->sed_hw_pool[i]) return -ENOMEM; @@ -1191,7 +1193,7 @@ list_for_each_entry(c, &dma->channels, device_node) { ioat_chan = to_ioat_chan(c); - writel(IOAT_DMA_DCA_ANY_CPU, + write32(IOAT_DMA_DCA_ANY_CPU, ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); } @@ -1216,13 +1218,13 @@ return err; if (ioat_dma->cap & IOAT_CAP_DPS) - writeb(ioat_pending_level + 1, + write8(ioat_pending_level + 1, ioat_dma->reg_base + IOAT_PREFETCH_LIMIT_OFFSET); return 0; } -static void ioat_shutdown(struct pci_dev *pdev) +static void ioat_shutdown(struct pci_device *pdev) { struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev); struct ioatdma_chan *ioat_chan; @@ -1236,9 +1238,9 @@ if (!ioat_chan) continue; - spin_lock_bh(&ioat_chan->prep_lock); + spin_lock(&ioat_chan->prep_lock); set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); - spin_unlock_bh(&ioat_chan->prep_lock); + spin_unlock(&ioat_chan->prep_lock); /* * Synchronization rule for del_timer_sync(): * - The caller must not hold locks which would prevent @@ -1257,7 +1259,7 @@ static void ioat_resume(struct ioatdma_device *ioat_dma) { struct ioatdma_chan *ioat_chan; - u32 chanerr; + uint32_t chanerr; int i; for (i = 0; i < IOAT_MAX_CHANS; i++) { @@ -1265,12 +1267,12 @@ if (!ioat_chan) continue; - spin_lock_bh(&ioat_chan->prep_lock); + spin_lock(&ioat_chan->prep_lock); clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state); - spin_unlock_bh(&ioat_chan->prep_lock); + spin_unlock(&ioat_chan->prep_lock); - chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); - writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + chanerr = read32(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + write32(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); /* no need to reset as shutdown already did that */ } @@ -1278,10 +1280,10 @@ #define DRV_NAME "ioatdma" -static pci_ers_result_t ioat_pcie_error_detected(struct pci_dev *pdev, +static pci_ers_result_t ioat_pcie_error_detected(struct pci_device *pdev, enum pci_channel_state error) { - dev_dbg(&pdev->dev, "%s: PCIe AER error %d\n", DRV_NAME, error); + dev_dbg(&pdev->linux_dev, "%s: PCIe AER error %d\n", DRV_NAME, error); /* quiesce and block I/O */ ioat_shutdown(pdev); @@ -1289,18 +1291,18 @@ return PCI_ERS_RESULT_NEED_RESET; } -static pci_ers_result_t ioat_pcie_error_slot_reset(struct pci_dev *pdev) +static pci_ers_result_t ioat_pcie_error_slot_reset(struct pci_device *pdev) { pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED; - dev_dbg(&pdev->dev, "%s post reset handling\n", DRV_NAME); + dev_dbg(&pdev->linux_dev, "%s post reset handling\n", DRV_NAME); if (pci_enable_device_mem(pdev) < 0) { - dev_err(&pdev->dev, + dev_err(&pdev->linux_dev, "Failed to enable PCIe device after reset.\n"); result = PCI_ERS_RESULT_DISCONNECT; } else { - pci_set_master(pdev); + pci_set_bus_master(pdev); pci_restore_state(pdev); pci_save_state(pdev); pci_wake_from_d3(pdev, false); @@ -1309,11 +1311,11 @@ return result; } -static void ioat_pcie_error_resume(struct pci_dev *pdev) +static void ioat_pcie_error_resume(struct pci_device *pdev) { struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev); - dev_dbg(&pdev->dev, "%s: AER handling resuming\n", DRV_NAME); + dev_dbg(&pdev->linux_dev, "%s: AER handling resuming\n", DRV_NAME); /* initialize and bring everything back */ ioat_resume(ioat_dma); @@ -1335,10 +1337,10 @@ }; static struct ioatdma_device * -alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase) +alloc_ioatdma(struct pci_device *pdev, void __iomem *iobase) { - struct device *dev = &pdev->dev; - struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); + struct device *dev = &pdev->linux_dev; + struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), MEM_WAIT); if (!d) return NULL; @@ -1347,10 +1349,11 @@ return d; } -static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +static int ioat_pci_probe(struct pci_device *pdev, + const struct pci_device_id *id) { void __iomem * const *iomap; - struct device *dev = &pdev->dev; + struct device *dev = &pdev->linux_dev; struct ioatdma_device *device; int err; @@ -1380,10 +1383,10 @@ device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]); if (!device) return -ENOMEM; - pci_set_master(pdev); + pci_set_bus_master(pdev); pci_set_drvdata(pdev, device); - device->version = readb(device->reg_base + IOAT_VER_OFFSET); + device->version = read8(device->reg_base + IOAT_VER_OFFSET); if (device->version >= IOAT_VER_3_4) ioat_dca_enabled = 0; if (device->version >= IOAT_VER_3_0) { @@ -1405,16 +1408,16 @@ return 0; } -static void ioat_remove(struct pci_dev *pdev) +static void ioat_remove(struct pci_device *pdev) { struct ioatdma_device *device = pci_get_drvdata(pdev); if (!device) return; - dev_err(&pdev->dev, "Removing dma and dca services\n"); + dev_err(&pdev->linux_dev, "Removing dma and dca services\n"); if (device->dca) { - unregister_dca_provider(device->dca, &pdev->dev); + unregister_dca_provider(device->dca, &pdev->linux_dev); free_dca_provider(device->dca); device->dca = NULL; }
diff --git a/kern/drivers/dma/ioat/prep.c b/kern/drivers/dma/ioat/prep.c index 243421a..2322c74 100644 --- a/kern/drivers/dma/ioat/prep.c +++ b/kern/drivers/dma/ioat/prep.c
@@ -31,17 +31,17 @@ /* provide a lookup table for setting the source address in the base or * extended descriptor of an xor or pq descriptor */ -static const u8 xor_idx_to_desc = 0xe0; -static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 }; -static const u8 pq_idx_to_desc = 0xf8; -static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1, +static const uint8_t xor_idx_to_desc = 0xe0; +static const uint8_t xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 }; +static const uint8_t pq_idx_to_desc = 0xf8; +static const uint8_t pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2 }; -static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 }; -static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7, +static const uint8_t pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 }; +static const uint8_t pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6 }; static void xor_set_src(struct ioat_raw_descriptor *descs[2], - dma_addr_t addr, u32 offset, int idx) + dma_addr_t addr, uint32_t offset, int idx) { struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; @@ -63,7 +63,8 @@ } static void pq_set_src(struct ioat_raw_descriptor *descs[2], - dma_addr_t addr, u32 offset, u8 coef, int idx) + dma_addr_t addr, uint32_t offset, uint8_t coef, + int idx) { struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0]; struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; @@ -73,7 +74,8 @@ } static void pq16_set_src(struct ioat_raw_descriptor *desc[3], - dma_addr_t addr, u32 offset, u8 coef, unsigned idx) + dma_addr_t addr, uint32_t offset, uint8_t coef, + unsigned idx) { struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0]; struct ioat_pq16a_descriptor *pq16 = @@ -92,7 +94,7 @@ ioat3_alloc_sed(struct ioatdma_device *ioat_dma, unsigned int hw_pool) { struct ioat_sed_ent *sed; - gfp_t flags = __GFP_ZERO | GFP_ATOMIC; + gfp_t flags = __GFP_ZERO | 0; sed = kmem_cache_alloc(ioat_sed_cache, flags); if (!sed) @@ -132,7 +134,7 @@ return NULL; i = 0; do { - size_t copy = min_t(size_t, len, 1 << ioat_chan->xfercap_log); + size_t copy = MIN_T(size_t, len, 1 << ioat_chan->xfercap_log); desc = ioat_get_ring_ent(ioat_chan, idx + i); hw = desc->hw; @@ -174,10 +176,10 @@ struct ioat_xor_ext_descriptor *xor_ex = NULL; struct ioat_dma_descriptor *hw; int num_descs, with_ext, idx, i; - u32 offset = 0; - u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR; + uint32_t offset = 0; + uint8_t op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR; - BUG_ON(src_cnt < 2); + assert(!(src_cnt < 2)); num_descs = ioat_xferlen_to_descs(ioat_chan, len); /* we need 2x the number of descriptors to cover greater than 5 @@ -202,8 +204,8 @@ i = 0; do { struct ioat_raw_descriptor *descs[2]; - size_t xfer_size = min_t(size_t, - len, 1 << ioat_chan->xfercap_log); + size_t xfer_size = MIN_T(size_t, len, + 1 << ioat_chan->xfercap_log); int s; desc = ioat_get_ring_ent(ioat_chan, idx + i); @@ -362,8 +364,8 @@ struct ioat_pq_descriptor *pq; struct ioat_pq_ext_descriptor *pq_ex = NULL; struct ioat_dma_descriptor *hw; - u32 offset = 0; - u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; + uint32_t offset = 0; + uint8_t op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; int i, s, idx, with_ext, num_descs; int cb32 = (ioat_dma->version < IOAT_VER_3_3) ? 1 : 0; @@ -371,7 +373,7 @@ /* the engine requires at least two sources (we provide * at least 1 implied source in the DMA_PREP_CONTINUE case) */ - BUG_ON(src_cnt + dmaf_continue(flags) < 2); + assert(!(src_cnt + dmaf_continue(flags) < 2)); num_descs = ioat_xferlen_to_descs(ioat_chan, len); /* we need 2x the number of descriptors to cover greater than 3 @@ -398,7 +400,7 @@ i = 0; do { struct ioat_raw_descriptor *descs[2]; - size_t xfer_size = min_t(size_t, len, + size_t xfer_size = MIN_T(size_t, len, 1 << ioat_chan->xfercap_log); desc = ioat_get_ring_ent(ioat_chan, idx + i); @@ -482,8 +484,8 @@ struct ioat_ring_ent *desc; size_t total_len = len; struct ioat_pq_descriptor *pq; - u32 offset = 0; - u8 op; + uint32_t offset = 0; + uint8_t op; int i, s, idx, num_descs; /* this function is only called with 9-16 sources */ @@ -506,7 +508,7 @@ do { struct ioat_raw_descriptor *descs[4]; - size_t xfer_size = min_t(size_t, len, + size_t xfer_size = MIN_T(size_t, len, 1 << ioat_chan->xfercap_log); desc = ioat_get_ring_ent(ioat_chan, idx + i); @@ -605,7 +607,7 @@ dma_addr_t single_source[2]; unsigned char single_source_coef[2]; - BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q); + assert(!(flags & DMA_PREP_PQ_DISABLE_Q)); single_source[0] = src[0]; single_source[1] = src[0]; single_source_coef[0] = scf[0];