dma: spatch Linux's dmaengine files Ran the scripts in scripts/spatch/linux/: funcs.cocci io_funcs.cocci memory.cocci scalar.cocci sync.cocci Signed-off-by: Barret Rhoden <brho@cs.berkeley.edu>
diff --git a/kern/drivers/dma/dmaengine.c b/kern/drivers/dma/dmaengine.c index 3a11b10..e1ebca3 100644 --- a/kern/drivers/dma/dmaengine.c +++ b/kern/drivers/dma/dmaengine.c
@@ -94,7 +94,7 @@ int i; int err; - mutex_lock(&dma_list_mutex); + qlock(&dma_list_mutex); chan = dev_to_dma_chan(dev); if (chan) { for_each_possible_cpu(i) @@ -102,7 +102,7 @@ err = sprintf(buf, "%lu\n", count); } else err = -ENODEV; - mutex_unlock(&dma_list_mutex); + qunlock(&dma_list_mutex); return err; } @@ -116,7 +116,7 @@ int i; int err; - mutex_lock(&dma_list_mutex); + qlock(&dma_list_mutex); chan = dev_to_dma_chan(dev); if (chan) { for_each_possible_cpu(i) @@ -124,7 +124,7 @@ err = sprintf(buf, "%lu\n", count); } else err = -ENODEV; - mutex_unlock(&dma_list_mutex); + qunlock(&dma_list_mutex); return err; } @@ -136,13 +136,13 @@ struct dma_chan *chan; int err; - mutex_lock(&dma_list_mutex); + qlock(&dma_list_mutex); chan = dev_to_dma_chan(dev); if (chan) err = sprintf(buf, "%d\n", chan->client_count); else err = -ENODEV; - mutex_unlock(&dma_list_mutex); + qunlock(&dma_list_mutex); return err; } @@ -161,7 +161,7 @@ struct dma_chan_dev *chan_dev; chan_dev = container_of(dev, typeof(*chan_dev), device); - if (atomic_dec_and_test(chan_dev->idr_ref)) { + if (atomic_sub_and_test(chan_dev->idr_ref, 1)) { ida_free(&dma_ida, chan_dev->dev_id); kfree(chan_dev->idr_ref); } @@ -591,7 +591,7 @@ int err = -EBUSY; /* lock against __dma_request_channel */ - mutex_lock(&dma_list_mutex); + qlock(&dma_list_mutex); if (chan->client_count == 0) { struct dma_device *device = chan->device; @@ -610,7 +610,7 @@ } else chan = NULL; - mutex_unlock(&dma_list_mutex); + qunlock(&dma_list_mutex); return chan; @@ -626,11 +626,11 @@ dma_cap_set(DMA_SLAVE, mask); /* lock against __dma_request_channel */ - mutex_lock(&dma_list_mutex); + qlock(&dma_list_mutex); chan = find_candidate(device, &mask, NULL, NULL); - mutex_unlock(&dma_list_mutex); + qunlock(&dma_list_mutex); return IS_ERR(chan) ? NULL : chan; } @@ -651,7 +651,7 @@ struct dma_chan *chan = NULL; /* Find a channel */ - mutex_lock(&dma_list_mutex); + qlock(&dma_list_mutex); list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { chan = find_candidate(device, mask, fn, fn_param); if (!IS_ERR(chan)) @@ -659,7 +659,7 @@ chan = NULL; } - mutex_unlock(&dma_list_mutex); + qunlock(&dma_list_mutex); pr_debug("%s: %s (%s)\n", __func__, @@ -717,7 +717,7 @@ } /* Try to find the channel via the DMA filter map(s) */ - mutex_lock(&dma_list_mutex); + qlock(&dma_list_mutex); list_for_each_entry_safe(d, _d, &dma_device_list, global_node) { dma_cap_mask_t mask; const struct dma_slave_map *map = dma_filter_match(d, name, dev); @@ -732,7 +732,7 @@ if (!IS_ERR(chan)) break; } - mutex_unlock(&dma_list_mutex); + qunlock(&dma_list_mutex); return chan ? chan : ERR_PTR(-EPROBE_DEFER); } @@ -771,12 +771,12 @@ chan = __dma_request_channel(mask, NULL, NULL); if (!chan) { - mutex_lock(&dma_list_mutex); + qlock(&dma_list_mutex); if (list_empty(&dma_device_list)) chan = ERR_PTR(-EPROBE_DEFER); else chan = ERR_PTR(-ENODEV); - mutex_unlock(&dma_list_mutex); + qunlock(&dma_list_mutex); } return chan; @@ -785,14 +785,14 @@ void dma_release_channel(struct dma_chan *chan) { - mutex_lock(&dma_list_mutex); + qlock(&dma_list_mutex); WARN_ONCE(chan->client_count != 1, "chan reference count %d != 1\n", chan->client_count); dma_chan_put(chan); /* drop PRIVATE cap enabled by __dma_request_channel() */ if (--chan->device->privatecnt == 0) dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); - mutex_unlock(&dma_list_mutex); + qunlock(&dma_list_mutex); } EXPORT_SYMBOL_GPL(dma_release_channel); @@ -805,7 +805,7 @@ struct dma_chan *chan; int err; - mutex_lock(&dma_list_mutex); + qlock(&dma_list_mutex); dmaengine_ref_count++; /* try to grab channels */ @@ -831,7 +831,7 @@ */ if (dmaengine_ref_count == 1) dma_channel_rebalance(); - mutex_unlock(&dma_list_mutex); + qunlock(&dma_list_mutex); } EXPORT_SYMBOL(dmaengine_get); @@ -843,9 +843,9 @@ struct dma_device *device; struct dma_chan *chan; - mutex_lock(&dma_list_mutex); + qlock(&dma_list_mutex); dmaengine_ref_count--; - BUG_ON(dmaengine_ref_count < 0); + assert(!(dmaengine_ref_count < 0)); /* drop channel references */ list_for_each_entry(device, &dma_device_list, global_node) { if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) @@ -853,7 +853,7 @@ list_for_each_entry(chan, &device->channels, device_node) dma_chan_put(chan); } - mutex_unlock(&dma_list_mutex); + qunlock(&dma_list_mutex); } EXPORT_SYMBOL(dmaengine_put); @@ -898,7 +898,7 @@ static int get_dma_id(struct dma_device *device) { - int rc = ida_alloc(&dma_ida, GFP_KERNEL); + int rc = ida_alloc(&dma_ida, MEM_WAIT); if (rc < 0) return rc; @@ -1006,7 +1006,7 @@ if (device_has_all_tx_types(device)) dma_cap_set(DMA_ASYNC_TX, device->cap_mask); - idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); + idr_ref = kmalloc(sizeof(*idr_ref), MEM_WAIT); if (!idr_ref) return -ENOMEM; rc = get_dma_id(device); @@ -1023,7 +1023,7 @@ chan->local = alloc_percpu(typeof(*chan->local)); if (chan->local == NULL) goto err_out; - chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); + chan->dev = kzmalloc(sizeof(*chan->dev), MEM_WAIT); if (chan->dev == NULL) { free_percpu(chan->local); chan->local = NULL; @@ -1059,7 +1059,7 @@ device->chancnt = chancnt; - mutex_lock(&dma_list_mutex); + qlock(&dma_list_mutex); /* take references on public channels */ if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) list_for_each_entry(chan, &device->channels, device_node) { @@ -1072,7 +1072,7 @@ * guaranteed to get a reference */ rc = -ENODEV; - mutex_unlock(&dma_list_mutex); + qunlock(&dma_list_mutex); goto err_out; } } @@ -1080,7 +1080,7 @@ if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) device->privatecnt++; /* Always private */ dma_channel_rebalance(); - mutex_unlock(&dma_list_mutex); + qunlock(&dma_list_mutex); return 0; @@ -1095,9 +1095,9 @@ list_for_each_entry(chan, &device->channels, device_node) { if (chan->local == NULL) continue; - mutex_lock(&dma_list_mutex); + qlock(&dma_list_mutex); chan->dev->chan = NULL; - mutex_unlock(&dma_list_mutex); + qunlock(&dma_list_mutex); device_unregister(&chan->dev->device); free_percpu(chan->local); } @@ -1116,18 +1116,18 @@ { struct dma_chan *chan; - mutex_lock(&dma_list_mutex); + qlock(&dma_list_mutex); list_del_rcu(&device->global_node); dma_channel_rebalance(); - mutex_unlock(&dma_list_mutex); + qunlock(&dma_list_mutex); list_for_each_entry(chan, &device->channels, device_node) { WARN_ONCE(chan->client_count, "%s called while %d clients hold a reference\n", __func__, chan->client_count); - mutex_lock(&dma_list_mutex); + qlock(&dma_list_mutex); chan->dev->chan = NULL; - mutex_unlock(&dma_list_mutex); + qunlock(&dma_list_mutex); device_unregister(&chan->dev->device); free_percpu(chan->local); } @@ -1153,7 +1153,7 @@ void *p; int ret; - p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL); + p = devres_alloc(dmam_device_release, sizeof(void *), MEM_WAIT); if (!p) return -ENOMEM; @@ -1202,7 +1202,7 @@ return &unmap_pool[3]; #endif default: - BUG(); + panic("BUG"); return NULL; } } @@ -1303,7 +1303,7 @@ { tx->chan = chan; #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH - spin_lock_init(&tx->lock); + spinlock_init_irqsave(&tx->lock); #endif } EXPORT_SYMBOL(dma_async_tx_descriptor_init);
diff --git a/kern/drivers/dma/dmaengine.h b/kern/drivers/dma/dmaengine.h index 501c0b0..fcd6cd5 100644 --- a/kern/drivers/dma/dmaengine.h +++ b/kern/drivers/dma/dmaengine.h
@@ -51,7 +51,7 @@ */ static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx) { - BUG_ON(tx->cookie < DMA_MIN_COOKIE); + assert(!(tx->cookie < DMA_MIN_COOKIE)); tx->chan->completed_cookie = tx->cookie; tx->cookie = 0; } @@ -72,7 +72,7 @@ used = chan->cookie; complete = chan->completed_cookie; - barrier(); + cmb(); if (state) { state->last = complete; state->used = used; @@ -81,7 +81,8 @@ return dma_async_is_complete(cookie, complete, used); } -static inline void dma_set_residue(struct dma_tx_state *state, u32 residue) +static inline void dma_set_residue(struct dma_tx_state *state, + uint32_t residue) { if (state) state->residue = residue;
diff --git a/kern/include/linux/dmaengine.h b/kern/include/linux/dmaengine.h index d49ec5c..d5c07e6 100644 --- a/kern/include/linux/dmaengine.h +++ b/kern/include/linux/dmaengine.h
@@ -31,7 +31,7 @@ * * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code */ -typedef s32 dma_cookie_t; +typedef int32_t dma_cookie_t; #define DMA_MIN_COOKIE 1 static inline int dma_submit_error(dma_cookie_t cookie) @@ -370,10 +370,10 @@ phys_addr_t dst_addr; enum dma_slave_buswidth src_addr_width; enum dma_slave_buswidth dst_addr_width; - u32 src_maxburst; - u32 dst_maxburst; - u32 src_port_window_size; - u32 dst_port_window_size; + uint32_t src_maxburst; + uint32_t dst_maxburst; + uint32_t src_port_window_size; + uint32_t dst_port_window_size; bool device_fc; unsigned int slave_id; }; @@ -424,10 +424,10 @@ * resubmitted multiple times */ struct dma_slave_caps { - u32 src_addr_widths; - u32 dst_addr_widths; - u32 directions; - u32 max_burst; + uint32_t src_addr_widths; + uint32_t dst_addr_widths; + uint32_t directions; + uint32_t max_burst; bool cmd_pause; bool cmd_resume; bool cmd_terminate; @@ -466,7 +466,7 @@ struct dmaengine_result { enum dmaengine_tx_result result; - u32 residue; + uint32_t residue; }; typedef void (*dma_async_tx_callback_result)(void *dma_async_param, @@ -474,13 +474,13 @@ struct dmaengine_unmap_data { #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) - u16 map_cnt; + uint16_t map_cnt; #else - u8 map_cnt; + uint8_t map_cnt; #endif - u8 to_cnt; - u8 from_cnt; - u8 bidi_cnt; + uint8_t to_cnt; + uint8_t from_cnt; + uint8_t bidi_cnt; struct device *dev; struct kref kref; size_t len; @@ -566,7 +566,7 @@ } static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) { - BUG(); + panic("BUG"); } static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) { @@ -586,11 +586,11 @@ #else static inline void txd_lock(struct dma_async_tx_descriptor *txd) { - spin_lock_bh(&txd->lock); + spin_lock(&txd->lock); } static inline void txd_unlock(struct dma_async_tx_descriptor *txd) { - spin_unlock_bh(&txd->lock); + spin_unlock(&txd->lock); } static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) { @@ -627,7 +627,7 @@ struct dma_tx_state { dma_cookie_t last; dma_cookie_t used; - u32 residue; + uint32_t residue; }; /** @@ -750,10 +750,10 @@ int dev_id; struct device *dev; - u32 src_addr_widths; - u32 dst_addr_widths; - u32 directions; - u32 max_burst; + uint32_t src_addr_widths; + uint32_t dst_addr_widths; + uint32_t directions; + uint32_t max_burst; bool descriptor_reuse; enum dma_residue_granularity residue_granularity; @@ -798,7 +798,7 @@ struct dma_chan *chan, struct dma_interleaved_template *xt, unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)( - struct dma_chan *chan, dma_addr_t dst, u64 data, + struct dma_chan *chan, dma_addr_t dst, uint64_t data, unsigned long flags); int (*device_config)(struct dma_chan *chan, @@ -1126,7 +1126,7 @@ return dma_dev_to_maxpq(dma) - 1; else if (dmaf_continue(flags)) return dma_dev_to_maxpq(dma) - 3; - BUG(); + panic("BUG"); } static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg, @@ -1299,7 +1299,8 @@ } static inline void -dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue) +dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, + uint32_t residue) { if (st) { st->last = last;