memremap: remove support for external pgmap refcounts

No driver is left using the external pgmap refcount, so remove the
code to support it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Bjorn Helgaas <bhelgaas@google.com>
Link: https://lore.kernel.org/r/20211028151017.50234-1-hch@lst.de
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
Christoph Hellwig 2021-10-28 17:10:17 +02:00 committed by Dan Williams
parent b842f1d14a
commit b80892ca02
4 changed files with 26 additions and 92 deletions

View file

@ -112,30 +112,6 @@ static unsigned long pfn_next(unsigned long pfn)
#define for_each_device_pfn(pfn, map, i) \
for (pfn = pfn_first(map, i); pfn < pfn_end(map, i); pfn = pfn_next(pfn))
static void dev_pagemap_kill(struct dev_pagemap *pgmap)
{
if (pgmap->ops && pgmap->ops->kill)
pgmap->ops->kill(pgmap);
else
percpu_ref_kill(pgmap->ref);
}
static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
{
if (pgmap->ops && pgmap->ops->cleanup) {
pgmap->ops->cleanup(pgmap);
} else {
wait_for_completion(&pgmap->done);
percpu_ref_exit(pgmap->ref);
}
/*
* Undo the pgmap ref assignment for the internal case as the
* caller may re-enable the same pgmap.
*/
if (pgmap->ref == &pgmap->internal_ref)
pgmap->ref = NULL;
}
static void pageunmap_range(struct dev_pagemap *pgmap, int range_id)
{
struct range *range = &pgmap->ranges[range_id];
@ -167,11 +143,12 @@ void memunmap_pages(struct dev_pagemap *pgmap)
unsigned long pfn;
int i;
dev_pagemap_kill(pgmap);
percpu_ref_kill(&pgmap->ref);
for (i = 0; i < pgmap->nr_range; i++)
for_each_device_pfn(pfn, pgmap, i)
put_page(pfn_to_page(pfn));
dev_pagemap_cleanup(pgmap);
wait_for_completion(&pgmap->done);
percpu_ref_exit(&pgmap->ref);
for (i = 0; i < pgmap->nr_range; i++)
pageunmap_range(pgmap, i);
@ -188,8 +165,7 @@ static void devm_memremap_pages_release(void *data)
static void dev_pagemap_percpu_release(struct percpu_ref *ref)
{
struct dev_pagemap *pgmap =
container_of(ref, struct dev_pagemap, internal_ref);
struct dev_pagemap *pgmap = container_of(ref, struct dev_pagemap, ref);
complete(&pgmap->done);
}
@ -295,8 +271,8 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
PHYS_PFN(range->start),
PHYS_PFN(range_len(range)), pgmap);
percpu_ref_get_many(pgmap->ref, pfn_end(pgmap, range_id)
- pfn_first(pgmap, range_id));
percpu_ref_get_many(&pgmap->ref,
pfn_end(pgmap, range_id) - pfn_first(pgmap, range_id));
return 0;
err_add_memory:
@ -362,22 +338,11 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
break;
}
if (!pgmap->ref) {
if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
return ERR_PTR(-EINVAL);
init_completion(&pgmap->done);
error = percpu_ref_init(&pgmap->internal_ref,
dev_pagemap_percpu_release, 0, GFP_KERNEL);
if (error)
return ERR_PTR(error);
pgmap->ref = &pgmap->internal_ref;
} else {
if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
WARN(1, "Missing reference count teardown definition\n");
return ERR_PTR(-EINVAL);
}
}
init_completion(&pgmap->done);
error = percpu_ref_init(&pgmap->ref, dev_pagemap_percpu_release, 0,
GFP_KERNEL);
if (error)
return ERR_PTR(error);
devmap_managed_enable_get(pgmap);
@ -486,7 +451,7 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
/* fall back to slow path lookup */
rcu_read_lock();
pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
if (pgmap && !percpu_ref_tryget_live(&pgmap->ref))
pgmap = NULL;
rcu_read_unlock();