diff options
author | Kent Russell <kent.russell@amd.com> | 2017-08-15 23:00:06 -0400 |
---|---|---|
committer | Oded Gabbay <oded.gabbay@gmail.com> | 2017-08-15 23:00:06 -0400 |
commit | 4eacc26b3b99c32deed150adff5e38ebce60527c (patch) | |
tree | f4299a1e8208c47a5182d3e80220872a5c07bb30 /drivers | |
parent | 79775b627dc49df06880a32b4340674554c669b9 (diff) | |
download | linux-4eacc26b3b99c32deed150adff5e38ebce60527c.tar.gz |
drm/amdkfd: Change x==NULL/false references to !x
Upstream prefers the !x notation to x==NULL or x==false. Along those lines
change the ==true or !=NULL references as well. Also make the references
to !x the same, excluding () for readability.
Signed-off-by: Kent Russell <kent.russell@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 22 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c | 20 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_device.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 50 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_events.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 26 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_process.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 6 |
15 files changed, 85 insertions, 87 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 6763972b03df..44c6bfe1ac57 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -265,7 +265,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, pr_debug("Looking for gpu id 0x%x\n", args->gpu_id); dev = kfd_device_by_id(args->gpu_id); - if (dev == NULL) { + if (!dev) { pr_debug("Could not find gpu id 0x%x\n", args->gpu_id); return -EINVAL; } @@ -400,7 +400,7 @@ static int kfd_ioctl_set_memory_policy(struct file *filep, } dev = kfd_device_by_id(args->gpu_id); - if (dev == NULL) + if (!dev) return -EINVAL; mutex_lock(&p->mutex); @@ -443,7 +443,7 @@ static int kfd_ioctl_dbg_register(struct file *filep, long status = 0; dev = kfd_device_by_id(args->gpu_id); - if (dev == NULL) + if (!dev) return -EINVAL; if (dev->device_info->asic_family == CHIP_CARRIZO) { @@ -465,7 +465,7 @@ static int kfd_ioctl_dbg_register(struct file *filep, return PTR_ERR(pdd); } - if (dev->dbgmgr == NULL) { + if (!dev->dbgmgr) { /* In case of a legal call, we have no dbgmgr yet */ create_ok = kfd_dbgmgr_create(&dbgmgr_ptr, dev); if (create_ok) { @@ -494,7 +494,7 @@ static int kfd_ioctl_dbg_unregister(struct file *filep, long status; dev = kfd_device_by_id(args->gpu_id); - if (dev == NULL) + if (!dev) return -EINVAL; if (dev->device_info->asic_family == CHIP_CARRIZO) { @@ -505,7 +505,7 @@ static int kfd_ioctl_dbg_unregister(struct file *filep, mutex_lock(kfd_get_dbgmgr_mutex()); status = kfd_dbgmgr_unregister(dev->dbgmgr, p); - if (status == 0) { + if (!status) { kfd_dbgmgr_destroy(dev->dbgmgr); dev->dbgmgr = NULL; } @@ -539,7 +539,7 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep, memset((void *) &aw_info, 0, sizeof(struct dbg_address_watch_info)); dev = kfd_device_by_id(args->gpu_id); - if (dev == NULL) + if (!dev) return -EINVAL; if (dev->device_info->asic_family == CHIP_CARRIZO) { @@ -646,7 +646,7 @@ static int kfd_ioctl_dbg_wave_control(struct file *filep, sizeof(wac_info.trapId); dev = kfd_device_by_id(args->gpu_id); - if (dev == NULL) + if (!dev) return -EINVAL; if (dev->device_info->asic_family == CHIP_CARRIZO) { @@ -782,9 +782,9 @@ static int kfd_ioctl_get_process_apertures(struct file *filp, "scratch_limit %llX\n", pdd->scratch_limit); args->num_of_nodes++; - } while ((pdd = kfd_get_next_process_device_data(p, pdd)) != - NULL && - (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS)); + + pdd = kfd_get_next_process_device_data(p, pdd); + } while (pdd && (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS)); } mutex_unlock(&p->mutex); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c index bf8ee196dd13..0ef913648bfa 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c @@ -77,7 +77,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev, status = kq->ops.acquire_packet_buffer(kq, pq_packets_size_in_bytes / sizeof(uint32_t), &ib_packet_buff); - if (status != 0) { + if (status) { pr_err("acquire_packet_buffer failed\n"); return status; } @@ -115,7 +115,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev, status = kfd_gtt_sa_allocate(dbgdev->dev, sizeof(uint64_t), &mem_obj); - if (status != 0) { + if (status) { pr_err("Failed to allocate GART memory\n"); kq->ops.rollback_packet(kq); return status; @@ -202,7 +202,7 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev) kq = pqm_get_kernel_queue(dbgdev->pqm, qid); - if (kq == NULL) { + if (!kq) { pr_err("Error getting DIQ\n"); pqm_destroy_queue(dbgdev->pqm, qid); return -EFAULT; @@ -252,7 +252,7 @@ static void dbgdev_address_watch_set_registers( addrLo->u32All = 0; cntl->u32All = 0; - if (adw_info->watch_mask != NULL) + if (adw_info->watch_mask) cntl->bitfields.mask = (uint32_t) (adw_info->watch_mask[index] & ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK); @@ -307,8 +307,7 @@ static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev, return -EINVAL; } - if ((adw_info->watch_mode == NULL) || - (adw_info->watch_address == NULL)) { + if (!adw_info->watch_mode || !adw_info->watch_address) { pr_err("adw_info fields are not valid\n"); return -EINVAL; } @@ -375,15 +374,14 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev, return -EINVAL; } - if ((NULL == adw_info->watch_mode) || - (NULL == adw_info->watch_address)) { + if (!adw_info->watch_mode || !adw_info->watch_address) { pr_err("adw_info fields are not valid\n"); return -EINVAL; } status = kfd_gtt_sa_allocate(dbgdev->dev, ib_size, &mem_obj); - if (status != 0) { + if (status) { pr_err("Failed to allocate GART memory\n"); return status; } @@ -490,7 +488,7 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev, packet_buff_uint, ib_size); - if (status != 0) { + if (status) { pr_err("Failed to submit IB to DIQ\n"); break; } @@ -711,7 +709,7 @@ static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev, packet_buff_uint, ib_size); - if (status != 0) + if (status) pr_err("Failed to submit IB to DIQ\n"); kfd_gtt_sa_free(dbgdev->dev, mem_obj); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c index 7225789f9c21..210bdc119866 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c @@ -55,7 +55,7 @@ static void kfd_dbgmgr_uninitialize(struct kfd_dbgmgr *pmgr) void kfd_dbgmgr_destroy(struct kfd_dbgmgr *pmgr) { - if (pmgr != NULL) { + if (pmgr) { kfd_dbgmgr_uninitialize(pmgr); kfree(pmgr); } @@ -66,7 +66,7 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev) enum DBGDEV_TYPE type = DBGDEV_TYPE_DIQ; struct kfd_dbgmgr *new_buff; - BUG_ON(pdev == NULL); + BUG_ON(!pdev); BUG_ON(!pdev->init_complete); new_buff = kfd_alloc_struct(new_buff); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 87df8bfb9fec..d962342a2e3e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -98,7 +98,7 @@ static const struct kfd_device_info *lookup_device_info(unsigned short did) for (i = 0; i < ARRAY_SIZE(supported_devices); i++) { if (supported_devices[i].did == did) { - BUG_ON(supported_devices[i].device_info == NULL); + BUG_ON(!supported_devices[i].device_info); return supported_devices[i].device_info; } } @@ -212,7 +212,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid, flags); dev = kfd_device_by_pci_dev(pdev); - BUG_ON(dev == NULL); + BUG_ON(!dev); kfd_signal_iommu_event(dev, pasid, address, flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC); @@ -262,7 +262,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd_doorbell_init(kfd); - if (kfd_topology_add_device(kfd) != 0) { + if (kfd_topology_add_device(kfd)) { dev_err(kfd_device, "Error adding device to topology\n"); goto kfd_topology_add_device_error; } @@ -288,7 +288,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, goto device_queue_manager_error; } - if (kfd->dqm->ops.start(kfd->dqm) != 0) { + if (kfd->dqm->ops.start(kfd->dqm)) { dev_err(kfd_device, "Error starting queue manager for device %x:%x\n", kfd->pdev->vendor, kfd->pdev->device); @@ -341,7 +341,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd) void kgd2kfd_suspend(struct kfd_dev *kfd) { - BUG_ON(kfd == NULL); + BUG_ON(!kfd); if (kfd->init_complete) { kfd->dqm->ops.stop(kfd->dqm); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 8b147e424bf3..df935312c58a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -167,7 +167,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, if (list_empty(&qpd->queues_list)) { retval = allocate_vmid(dqm, qpd, q); - if (retval != 0) { + if (retval) { mutex_unlock(&dqm->lock); return retval; } @@ -180,7 +180,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, if (q->properties.type == KFD_QUEUE_TYPE_SDMA) retval = create_sdma_queue_nocpsch(dqm, q, qpd); - if (retval != 0) { + if (retval) { if (list_empty(&qpd->queues_list)) { deallocate_vmid(dqm, qpd, q); *allocated_vmid = 0; @@ -262,16 +262,16 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, BUG_ON(!dqm || !q || !qpd); mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); - if (mqd == NULL) + if (!mqd) return -ENOMEM; retval = allocate_hqd(dqm, q); - if (retval != 0) + if (retval) return retval; retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, &q->gart_mqd_addr, &q->properties); - if (retval != 0) { + if (retval) { deallocate_hqd(dqm, q); return retval; } @@ -281,7 +281,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue, (uint32_t __user *) q->properties.write_ptr); - if (retval != 0) { + if (retval) { deallocate_hqd(dqm, q); mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); return retval; @@ -330,7 +330,7 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm, QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS, q->pipe, q->queue); - if (retval != 0) + if (retval) goto out; mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); @@ -365,7 +365,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) mutex_lock(&dqm->lock); mqd = dqm->ops.get_mqd_manager(dqm, get_mqd_type_from_queue_type(q->properties.type)); - if (mqd == NULL) { + if (!mqd) { mutex_unlock(&dqm->lock); return -ENOMEM; } @@ -381,7 +381,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) retval = mqd->update_mqd(mqd, q->mqd, &q->properties); if ((q->properties.is_active) && (!prev_active)) dqm->queue_count++; - else if ((!q->properties.is_active) && (prev_active)) + else if (!q->properties.is_active && prev_active) dqm->queue_count--; if (sched_policy != KFD_SCHED_POLICY_NO_HWS) @@ -403,7 +403,7 @@ static struct mqd_manager *get_mqd_manager_nocpsch( mqd = dqm->mqds[type]; if (!mqd) { mqd = mqd_manager_init(type, dqm->dev); - if (mqd == NULL) + if (!mqd) pr_err("mqd manager is NULL"); dqm->mqds[type] = mqd; } @@ -485,7 +485,7 @@ static void init_interrupts(struct device_queue_manager *dqm) { unsigned int i; - BUG_ON(dqm == NULL); + BUG_ON(!dqm); for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) if (is_pipe_enabled(dqm, 0, i)) @@ -589,7 +589,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, return -ENOMEM; retval = allocate_sdma_queue(dqm, &q->sdma_id); - if (retval != 0) + if (retval) return retval; q->properties.sdma_queue_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE; @@ -602,14 +602,14 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd); retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, &q->gart_mqd_addr, &q->properties); - if (retval != 0) { + if (retval) { deallocate_sdma_queue(dqm, q->sdma_id); return retval; } retval = mqd->load_mqd(mqd, q->mqd, 0, 0, NULL); - if (retval != 0) { + if (retval) { deallocate_sdma_queue(dqm, q->sdma_id); mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); return retval; @@ -680,7 +680,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm) dqm->sdma_queue_count = 0; dqm->active_runlist = false; retval = dqm->ops_asic_specific.initialize(dqm); - if (retval != 0) + if (retval) goto fail_init_pipelines; return 0; @@ -700,11 +700,11 @@ static int start_cpsch(struct device_queue_manager *dqm) retval = 0; retval = pm_init(&dqm->packets, dqm); - if (retval != 0) + if (retval) goto fail_packet_manager_init; retval = set_sched_resources(dqm); - if (retval != 0) + if (retval) goto fail_set_sched_resources; pr_debug("Allocating fence memory\n"); @@ -713,7 +713,7 @@ static int start_cpsch(struct device_queue_manager *dqm) retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr), &dqm->fence_mem); - if (retval != 0) + if (retval) goto fail_allocate_vidmem; dqm->fence_addr = dqm->fence_mem->cpu_ptr; @@ -845,7 +845,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, mqd = dqm->ops.get_mqd_manager(dqm, get_mqd_type_from_queue_type(q->properties.type)); - if (mqd == NULL) { + if (!mqd) { mutex_unlock(&dqm->lock); return -ENOMEM; } @@ -853,7 +853,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd); retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, &q->gart_mqd_addr, &q->properties); - if (retval != 0) + if (retval) goto out; list_add(&q->list, &qpd->queues_list); @@ -934,7 +934,7 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm, retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE, preempt_type, 0, false, 0); - if (retval != 0) + if (retval) goto out; *dqm->fence_addr = KFD_FENCE_INIT; @@ -943,7 +943,7 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm, /* should be timed out */ retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED, QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS); - if (retval != 0) { + if (retval) { pdd = kfd_get_process_device_data(dqm->dev, kfd_get_process(current)); pdd->reset_wavefronts = true; @@ -968,7 +968,7 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock) mutex_lock(&dqm->lock); retval = destroy_queues_cpsch(dqm, false, false); - if (retval != 0) { + if (retval) { pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption"); goto out; } @@ -984,7 +984,7 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock) } retval = pm_send_runlist(&dqm->packets, &dqm->queues); - if (retval != 0) { + if (retval) { pr_err("failed to execute runlist"); goto out; } @@ -1193,7 +1193,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) break; } - if (dqm->ops.initialize(dqm) != 0) { + if (dqm->ops.initialize(dqm)) { kfree(dqm); return NULL; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c index ca215386ca9e..48018a313e35 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c @@ -131,7 +131,7 @@ int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma) /* Find kfd device according to gpu id */ dev = kfd_device_by_id(vma->vm_pgoff); - if (dev == NULL) + if (!dev) return -EINVAL; /* Calculate physical address of doorbell */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index abdaf95a2198..5979158c3f7b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -247,7 +247,7 @@ static u32 make_nonsignal_event_id(struct kfd_process *p) for (id = p->next_nonsignal_event_id; id < KFD_LAST_NONSIGNAL_EVENT_ID && - lookup_event_by_id(p, id) != NULL; + lookup_event_by_id(p, id); id++) ; @@ -266,7 +266,7 @@ static u32 make_nonsignal_event_id(struct kfd_process *p) for (id = KFD_FIRST_NONSIGNAL_EVENT_ID; id < KFD_LAST_NONSIGNAL_EVENT_ID && - lookup_event_by_id(p, id) != NULL; + lookup_event_by_id(p, id); id++) ; @@ -342,7 +342,7 @@ void kfd_event_init_process(struct kfd_process *p) static void destroy_event(struct kfd_process *p, struct kfd_event *ev) { - if (ev->signal_page != NULL) { + if (ev->signal_page) { release_event_notification_slot(ev->signal_page, ev->signal_slot_index); p->signal_event_count--; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index 2b655103ba79..c59384bbbc5f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -304,7 +304,7 @@ int kfd_init_apertures(struct kfd_process *process) id < NUM_OF_SUPPORTED_GPUS) { pdd = kfd_create_process_device_data(dev, process); - if (pdd == NULL) { + if (!pdd) { pr_err("Failed to create process device data\n"); return -1; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index f89d366241fc..884479886867 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -67,12 +67,12 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, break; } - if (kq->mqd == NULL) + if (!kq->mqd) return false; prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off); - if (prop.doorbell_ptr == NULL) { + if (!prop.doorbell_ptr) { pr_err("Failed to initialize doorbell"); goto err_get_kernel_doorbell; } @@ -87,7 +87,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, kq->pq_gpu_addr = kq->pq->gpu_addr; retval = kq->ops_asic_specific.initialize(kq, dev, type, queue_size); - if (retval == false) + if (!retval) goto err_eop_allocate_vidmem; retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->rptr_kernel), diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 27fd93066058..9908227437d2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -99,7 +99,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd, m->cp_hqd_iq_rptr = AQL_ENABLE; *mqd = m; - if (gart_addr != NULL) + if (gart_addr) *gart_addr = addr; retval = mm->update_mqd(mm, m, q); @@ -127,7 +127,7 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd, memset(m, 0, sizeof(struct cik_sdma_rlc_registers)); *mqd = m; - if (gart_addr != NULL) + if (gart_addr) *gart_addr = (*mqd_mem_obj)->gpu_addr; retval = mm->update_mqd(mm, m, q); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index 5dc30f56ab44..5ba3b40f787a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -85,7 +85,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd, m->cp_hqd_iq_rptr = 1; *mqd = m; - if (gart_addr != NULL) + if (gart_addr) *gart_addr = addr; retval = mm->update_mqd(mm, m, q); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index 31d7d46463b5..f3b8cc8ea265 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -98,14 +98,14 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm, BUG_ON(!pm); BUG_ON(pm->allocated); - BUG_ON(is_over_subscription == NULL); + BUG_ON(!is_over_subscription); pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription); retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size, &pm->ib_buffer_obj); - if (retval != 0) { + if (retval) { pr_err("Failed to allocate runlist IB\n"); return retval; } @@ -321,7 +321,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm, retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr, &alloc_size_bytes, &is_over_subscription); - if (retval != 0) + if (retval) return retval; *rl_size_bytes = alloc_size_bytes; @@ -340,7 +340,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm, } retval = pm_create_map_process(pm, &rl_buffer[rl_wptr], qpd); - if (retval != 0) + if (retval) return retval; proccesses_mapped++; @@ -365,7 +365,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm, &rl_buffer[rl_wptr], kq->queue, qpd->is_debug); - if (retval != 0) + if (retval) return retval; inc_wptr(&rl_wptr, @@ -392,7 +392,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm, q, qpd->is_debug); - if (retval != 0) + if (retval) return retval; inc_wptr(&rl_wptr, @@ -421,7 +421,7 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) pm->dqm = dqm; mutex_init(&pm->lock); pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ); - if (pm->priv_queue == NULL) { + if (!pm->priv_queue) { mutex_destroy(&pm->lock); return -ENOMEM; } @@ -449,7 +449,7 @@ int pm_send_set_resources(struct packet_manager *pm, pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, sizeof(*packet) / sizeof(uint32_t), (unsigned int **)&packet); - if (packet == NULL) { + if (!packet) { mutex_unlock(&pm->lock); pr_err("Failed to allocate buffer on kernel queue\n"); return -ENOMEM; @@ -491,7 +491,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues) retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr, &rl_ib_size); - if (retval != 0) + if (retval) goto fail_create_runlist_ib; pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr); @@ -501,12 +501,12 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues) retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, packet_size_dwords, &rl_buffer); - if (retval != 0) + if (retval) goto fail_acquire_packet_buffer; retval = pm_create_runlist(pm, rl_buffer, rl_gpu_ib_addr, rl_ib_size / sizeof(uint32_t), false); - if (retval != 0) + if (retval) goto fail_create_runlist; pm->priv_queue->ops.submit_packet(pm->priv_queue); @@ -537,7 +537,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, pm->priv_queue, sizeof(struct pm4_query_status) / sizeof(uint32_t), (unsigned int **)&packet); - if (retval != 0) + if (retval) goto fail_acquire_packet_buffer; packet->header.u32all = build_pm4_header(IT_QUERY_STATUS, @@ -580,7 +580,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, pm->priv_queue, sizeof(struct pm4_unmap_queues) / sizeof(uint32_t), &buffer); - if (retval != 0) + if (retval) goto err_acquire_packet_buffer; packet = (struct pm4_unmap_queues *)buffer; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 86032bd0ad0d..d877cdaeaada 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -81,7 +81,7 @@ struct kfd_process *kfd_create_process(const struct task_struct *thread) BUG_ON(!kfd_process_wq); - if (thread->mm == NULL) + if (!thread->mm) return ERR_PTR(-EINVAL); /* Only the pthreads threading model is supported. */ @@ -117,7 +117,7 @@ struct kfd_process *kfd_get_process(const struct task_struct *thread) { struct kfd_process *process; - if (thread->mm == NULL) + if (!thread->mm) return ERR_PTR(-EINVAL); /* Only the pthreads threading model is supported. */ @@ -407,7 +407,7 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid) struct kfd_process *p; struct kfd_process_device *pdd; - BUG_ON(dev == NULL); + BUG_ON(!dev); /* * Look for the process that matches the pasid. If there is no such diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 9482a5adfbb8..d4f8bae2dfc9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -76,7 +76,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p) pqm->queue_slot_bitmap = kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_BYTE), GFP_KERNEL); - if (pqm->queue_slot_bitmap == NULL) + if (!pqm->queue_slot_bitmap) return -ENOMEM; pqm->process = p; @@ -223,7 +223,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, break; case KFD_QUEUE_TYPE_DIQ: kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ); - if (kq == NULL) { + if (!kq) { retval = -ENOMEM; goto err_create_queue; } @@ -279,7 +279,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) retval = 0; pqn = get_queue_by_qid(pqm, qid); - if (pqn == NULL) { + if (!pqn) { pr_err("Queue id does not match any known queue\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 72d566a9203c..113c1cecbf72 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -416,7 +416,7 @@ static struct kfd_topology_device *kfd_create_topology_device(void) struct kfd_topology_device *dev; dev = kfd_alloc_struct(dev); - if (dev == NULL) { + if (!dev) { pr_err("No memory to allocate a topology device"); return NULL; } @@ -957,7 +957,7 @@ static int kfd_topology_update_sysfs(void) int ret; pr_info("Creating topology SYSFS entries\n"); - if (sys_props.kobj_topology == NULL) { + if (!sys_props.kobj_topology) { sys_props.kobj_topology = kfd_alloc_struct(sys_props.kobj_topology); if (!sys_props.kobj_topology) @@ -1120,7 +1120,7 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) BUG_ON(!gpu); list_for_each_entry(dev, &topology_device_list, list) - if (dev->gpu == NULL && dev->node_props.simd_count > 0) { + if (!dev->gpu && (dev->node_props.simd_count > 0)) { dev->gpu = gpu; out_dev = dev; break; |