Merge tag 'block-6.17-20250808' of git://git.kernel.dk/linux
Pull more block updates from Jens Axboe:
- MD pull request via Yu:
- mddev null-ptr-dereference fix, by Erkun
- md-cluster fail to remove the faulty disk regression fix, by
Heming
- minor cleanup, by Li Nan and Jinchao
- mdadm lifetime regression fix reported by syzkaller, by Yu Kuai
- MD pull request via Christoph
- add support for getting the FDP featuee in fabrics passthru path
(Nitesh Shetty)
- add capability to connect to an administrative controller
(Kamaljit Singh)
- fix a leak on sgl setup error (Keith Busch)
- initialize discovery subsys after debugfs is initialized
(Mohamed Khalfella)
- fix various comment typos (Bjorn Helgaas)
- remove unneeded semicolons (Jiapeng Chong)
- nvmet debugfs ordering issue fix
- Fix UAF in the tag_set in zloop
- Ensure sbitmap shallow depth covers entire set
- Reduce lock roundtrips in io context lookup
- Move scheduler tags alloc/free out of elevator and freeze lock, to
fix some lockdep found issues
- Improve robustness of queue limits checking
- Fix a regression with IO priorities, if no io context exists
* tag 'block-6.17-20250808' of git://git.kernel.dk/linux: (26 commits)
lib/sbitmap: make sbitmap_get_shallow() internal
lib/sbitmap: convert shallow_depth from one word to the whole sbitmap
nvmet: exit debugfs after discovery subsystem exits
block, bfq: Reorder struct bfq_iocq_bfqq_data
md: make rdev_addable usable for rcu mode
md/raid1: remove struct pool_info and related code
md/raid1: change r1conf->r1bio_pool to a pointer type
block: ensure discard_granularity is zero when discard is not supported
zloop: fix KASAN use-after-free of tag set
block: Fix default IO priority if there is no IO context
nvme: fix various comment typos
nvme-auth: remove unneeded semicolon
nvme-pci: fix leak on sgl setup error
nvmet: initialize discovery subsys after debugfs is initialized
nvme: add capability to connect to an administrative controller
nvmet: add support for FDP in fabrics passthru path
md: rename recovery_cp to resync_offset
md/md-cluster: handle REMOVE message earlier
md: fix create on open mddev lifetime regression
block: fix potential deadlock while running nr_hw_queue update
...
This commit is contained in:
@@ -208,8 +208,28 @@ static int sbitmap_find_bit_in_word(struct sbitmap_word *map,
|
||||
return nr;
|
||||
}
|
||||
|
||||
static unsigned int __map_depth_with_shallow(const struct sbitmap *sb,
|
||||
int index,
|
||||
unsigned int shallow_depth)
|
||||
{
|
||||
u64 shallow_word_depth;
|
||||
unsigned int word_depth, reminder;
|
||||
|
||||
word_depth = __map_depth(sb, index);
|
||||
if (shallow_depth >= sb->depth)
|
||||
return word_depth;
|
||||
|
||||
shallow_word_depth = word_depth * shallow_depth;
|
||||
reminder = do_div(shallow_word_depth, sb->depth);
|
||||
|
||||
if (reminder >= (index + 1) * word_depth)
|
||||
shallow_word_depth++;
|
||||
|
||||
return (unsigned int)shallow_word_depth;
|
||||
}
|
||||
|
||||
static int sbitmap_find_bit(struct sbitmap *sb,
|
||||
unsigned int depth,
|
||||
unsigned int shallow_depth,
|
||||
unsigned int index,
|
||||
unsigned int alloc_hint,
|
||||
bool wrap)
|
||||
@@ -218,12 +238,12 @@ static int sbitmap_find_bit(struct sbitmap *sb,
|
||||
int nr = -1;
|
||||
|
||||
for (i = 0; i < sb->map_nr; i++) {
|
||||
nr = sbitmap_find_bit_in_word(&sb->map[index],
|
||||
min_t(unsigned int,
|
||||
__map_depth(sb, index),
|
||||
depth),
|
||||
alloc_hint, wrap);
|
||||
unsigned int depth = __map_depth_with_shallow(sb, index,
|
||||
shallow_depth);
|
||||
|
||||
if (depth)
|
||||
nr = sbitmap_find_bit_in_word(&sb->map[index], depth,
|
||||
alloc_hint, wrap);
|
||||
if (nr != -1) {
|
||||
nr += index << sb->shift;
|
||||
break;
|
||||
@@ -287,7 +307,22 @@ static int __sbitmap_get_shallow(struct sbitmap *sb,
|
||||
return sbitmap_find_bit(sb, shallow_depth, index, alloc_hint, true);
|
||||
}
|
||||
|
||||
int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth)
|
||||
/**
|
||||
* sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
|
||||
* limiting the depth used from each word.
|
||||
* @sb: Bitmap to allocate from.
|
||||
* @shallow_depth: The maximum number of bits to allocate from the bitmap.
|
||||
*
|
||||
* This rather specific operation allows for having multiple users with
|
||||
* different allocation limits. E.g., there can be a high-priority class that
|
||||
* uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow()
|
||||
* with a @shallow_depth of (sb->depth >> 1). Then, the low-priority
|
||||
* class can only allocate half of the total bits in the bitmap, preventing it
|
||||
* from starving out the high-priority class.
|
||||
*
|
||||
* Return: Non-negative allocated bit number if successful, -1 otherwise.
|
||||
*/
|
||||
static int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth)
|
||||
{
|
||||
int nr;
|
||||
unsigned int hint, depth;
|
||||
@@ -302,7 +337,6 @@ int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth)
|
||||
|
||||
return nr;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
|
||||
|
||||
bool sbitmap_any_bit_set(const struct sbitmap *sb)
|
||||
{
|
||||
@@ -406,27 +440,9 @@ EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
|
||||
static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
|
||||
unsigned int depth)
|
||||
{
|
||||
unsigned int wake_batch;
|
||||
unsigned int shallow_depth;
|
||||
|
||||
/*
|
||||
* Each full word of the bitmap has bits_per_word bits, and there might
|
||||
* be a partial word. There are depth / bits_per_word full words and
|
||||
* depth % bits_per_word bits left over. In bitwise arithmetic:
|
||||
*
|
||||
* bits_per_word = 1 << shift
|
||||
* depth / bits_per_word = depth >> shift
|
||||
* depth % bits_per_word = depth & ((1 << shift) - 1)
|
||||
*
|
||||
* Each word can be limited to sbq->min_shallow_depth bits.
|
||||
*/
|
||||
shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
|
||||
depth = ((depth >> sbq->sb.shift) * shallow_depth +
|
||||
min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
|
||||
wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
|
||||
SBQ_WAKE_BATCH);
|
||||
|
||||
return wake_batch;
|
||||
return clamp_t(unsigned int,
|
||||
min(depth, sbq->min_shallow_depth) / SBQ_WAIT_QUEUES,
|
||||
1, SBQ_WAKE_BATCH);
|
||||
}
|
||||
|
||||
int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
|
||||
|
||||
Reference in New Issue
Block a user