From: Keith Busch A bio segment may have partial interval block data with the rest continuing into the next segments because direct-io data payloads only need to aligned in memory to the device's DMA limits. At the same time, the protection information may also be split in multiple segments. The most likely way that may happen is if two requests merge, or if we're directly using the io_uring user metadata. The generate/verify, however, only ever accessed the first bip_vec. Further, it may be possible to unalign the protection fields from the user space buffer, or if there are odd additional opaque bytes in front or in back of the protection information metadata region. Change up the iteration to allow spanning multiple segments. This patch is mostly a re-write of the protection information handling to allow any arbitrary alignments, so it's probably easier to review the end result rather than the diff. Martin reports many SCSI controllers are not able to handle interval data composed of multiple segments when PI is used, so this patch introduces a new integrity limit that a low level driver can set to notify that it is capable of handling that, default to false. The nvme driver is the first one to enable it in this patch. Everyone else will force DMA alignment to the logical block size to ensure interval data is always aligned within a single segment. Cc: "Martin K. Petersen" Signed-off-by: Keith Busch --- v4->v5: * Fixed up ip checksum when it's split among intervals. The previous version would happen to work if the data interval was aligned in a single segment, but it would have gotten the wrong final result if it had to do multiple partial updates. Testing this type was a little more difficult than it sounds. The scsi_debug driver would force alignment, so it would never hit the partial condition that was previously broken. To test it, I hacked up a nvme qemu emulation for this checksum type, taking some liberty with the protocol's undefined fields. qemu has its own checksum implementation, 'net_raw_checksum()', and it is calculating the same result through its contiguous bounce buffer as the kernel's partial + fold implementation, so I think it's okay now. * Fixed up some naming issues that Martin requested from an even earlier version of this patch. block/blk-settings.c | 6 +- block/t10-pi.c | 825 ++++++++++++++++++++++----------------- drivers/nvme/host/core.c | 1 + include/linux/blkdev.h | 1 + 4 files changed, 468 insertions(+), 365 deletions(-) diff --git a/block/blk-settings.c b/block/blk-settings.c index 51401f08ce05b..cfd084e824af7 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -198,11 +198,11 @@ static int blk_validate_integrity_limits(struct queue_limits *lim) bi->interval_exp = ilog2(lim->logical_block_size); /* - * The PI generation / validation helpers do not expect intervals to - * straddle multiple bio_vecs. Enforce alignment so that those are + * Some IO controllers can not handle data intervals straddling + * multiple bio_vecs. For those, enforce alignment so that those are * never generated, and that each buffer is aligned as expected. */ - if (bi->csum_type) { + if (!bi->split_interval_capable && bi->csum_type) { lim->dma_alignment = max(lim->dma_alignment, (1U << bi->interval_exp) - 1); } diff --git a/block/t10-pi.c b/block/t10-pi.c index 0c4ed97021460..da85458f7a41a 100644 --- a/block/t10-pi.c +++ b/block/t10-pi.c @@ -12,462 +12,563 @@ #include #include "blk.h" +#define APP_TAG_ESCAPE 0xffff +#define REF_TAG_ESCAPE 0xffffffff + +/* + * This union is used for onstack allocations when the pi field is split across + * segments. blk_validate_integrity_limits() guarantees pi_tuple_size matches + * the sizeof one of these two types. + */ +union pi_tuple { + struct crc64_pi_tuple crc64_pi; + struct t10_pi_tuple t10_pi; +}; + struct blk_integrity_iter { - void *prot_buf; - void *data_buf; - sector_t seed; - unsigned int data_size; - unsigned short interval; - const char *disk_name; + struct bio *bio; + struct bio_integrity_payload *bip; + struct blk_integrity *bi; + struct bvec_iter data_iter; + struct bvec_iter prot_iter; + unsigned int interval_remaining; + u64 seed; + u64 csum; }; -static __be16 t10_pi_csum(__be16 csum, void *data, unsigned int len, - unsigned char csum_type) +static void blk_calculate_guard(struct blk_integrity_iter *iter, void *data, + unsigned int len) { - if (csum_type == BLK_INTEGRITY_CSUM_IP) - return (__force __be16)ip_compute_csum(data, len); - return cpu_to_be16(crc_t10dif_update(be16_to_cpu(csum), data, len)); + switch (iter->bi->csum_type) { + case BLK_INTEGRITY_CSUM_CRC64: + iter->csum = crc64_nvme(iter->csum, data, len); + break; + case BLK_INTEGRITY_CSUM_CRC: + iter->csum = crc_t10dif_update(iter->csum, data, len); + break; + case BLK_INTEGRITY_CSUM_IP: + iter->csum = (__force u32)csum_partial(data, len, + (__force __wsum)iter->csum); + break; + default: + WARN_ON_ONCE(1); + iter->csum = U64_MAX; + break; + } +} + +static void blk_integrity_csum_finish(struct blk_integrity_iter *iter) +{ + switch (iter->bi->csum_type) { + case BLK_INTEGRITY_CSUM_IP: + iter->csum = (__force u16)csum_fold((__force __wsum)iter->csum); + break; + default: + break; + } } /* - * Type 1 and Type 2 protection use the same format: 16 bit guard tag, - * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref - * tag. + * Update the csum for formats that have metadata padding in front of the data + * integrity field */ -static void t10_pi_generate(struct blk_integrity_iter *iter, - struct blk_integrity *bi) +static void blk_integrity_csum_offset(struct blk_integrity_iter *iter) +{ + unsigned int offset = iter->bi->pi_offset; + struct bio_vec *bvec = iter->bip->bip_vec; + + while (offset > 0) { + struct bio_vec pbv = mp_bvec_iter_bvec(bvec, iter->prot_iter); + unsigned int len = min(pbv.bv_len, offset); + void *prot_buf = bvec_kmap_local(&pbv); + + blk_calculate_guard(iter, prot_buf, len); + kunmap_local(prot_buf); + offset -= len; + bvec_iter_advance_single(bvec, &iter->prot_iter, len); + } + blk_integrity_csum_finish(iter); +} + +static void blk_integrity_copy_from_tuple(struct bio_integrity_payload *bip, + struct bvec_iter *iter, void *tuple, + unsigned int tuple_size) { - u8 offset = bi->pi_offset; - unsigned int i; + void *prot_buf; - for (i = 0 ; i < iter->data_size ; i += iter->interval) { - struct t10_pi_tuple *pi = iter->prot_buf + offset; + while (tuple_size) { + struct bio_vec pbv = mp_bvec_iter_bvec(bip->bip_vec, *iter); + unsigned int len = min(tuple_size, pbv.bv_len); - pi->guard_tag = t10_pi_csum(0, iter->data_buf, iter->interval, - bi->csum_type); - if (offset) - pi->guard_tag = t10_pi_csum(pi->guard_tag, - iter->prot_buf, offset, bi->csum_type); - pi->app_tag = 0; + prot_buf = bvec_kmap_local(&pbv); + memcpy(prot_buf, tuple, len); + kunmap_local(prot_buf); - if (bi->flags & BLK_INTEGRITY_REF_TAG) - pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed)); - else - pi->ref_tag = 0; + bvec_iter_advance_single(bip->bip_vec, iter, len); + tuple_size -= len; + tuple += len; + } +} - iter->data_buf += iter->interval; - iter->prot_buf += bi->metadata_size; - iter->seed++; +static void blk_integrity_copy_to_tuple(struct bio_integrity_payload *bip, + struct bvec_iter *iter, void *tuple, + unsigned int tuple_size) +{ + void *prot_buf; + + while (tuple_size) { + struct bio_vec pbv = mp_bvec_iter_bvec(bip->bip_vec, *iter); + unsigned int len = min(tuple_size, pbv.bv_len); + + prot_buf = bvec_kmap_local(&pbv); + memcpy(tuple, prot_buf, len); + kunmap_local(prot_buf); + + bvec_iter_advance_single(bip->bip_vec, iter, len); + tuple_size -= len; + tuple += len; } } -static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter, - struct blk_integrity *bi) -{ - u8 offset = bi->pi_offset; - unsigned int i; - - for (i = 0 ; i < iter->data_size ; i += iter->interval) { - struct t10_pi_tuple *pi = iter->prot_buf + offset; - __be16 csum; - - if (bi->flags & BLK_INTEGRITY_REF_TAG) { - if (pi->app_tag == T10_PI_APP_ESCAPE) - goto next; - - if (be32_to_cpu(pi->ref_tag) != - lower_32_bits(iter->seed)) { - pr_err("%s: ref tag error at location %llu " \ - "(rcvd %u)\n", iter->disk_name, - (unsigned long long) - iter->seed, be32_to_cpu(pi->ref_tag)); - return BLK_STS_PROTECTION; - } - } else { - if (pi->app_tag == T10_PI_APP_ESCAPE && - pi->ref_tag == T10_PI_REF_ESCAPE) - goto next; +static bool ext_pi_ref_escape(const u8 ref_tag[6]) +{ + static const u8 ref_escape[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + + return memcmp(ref_tag, ref_escape, sizeof(ref_escape)) == 0; +} + +static blk_status_t blk_verify_ext_pi(struct blk_integrity_iter *iter, + struct crc64_pi_tuple *pi) +{ + u64 seed = lower_48_bits(iter->seed); + u64 guard = get_unaligned_be64(&pi->guard_tag); + u64 ref = get_unaligned_be48(pi->ref_tag); + u16 app = get_unaligned_be16(&pi->app_tag); + + if (iter->bi->flags & BLK_INTEGRITY_REF_TAG) { + if (app == APP_TAG_ESCAPE) + return BLK_STS_OK; + if (ref != seed) { + pr_err("%s: ref tag error at location %llu (rcvd %llu)\n", + iter->bio->bi_bdev->bd_disk->disk_name, seed, + ref); + return BLK_STS_PROTECTION; } + } else if (app == APP_TAG_ESCAPE && ext_pi_ref_escape(pi->ref_tag)) { + return BLK_STS_OK; + } + + if (guard != iter->csum) { + pr_err("%s: guard tag error at sector %llu (rcvd %016llx, want %016llx)\n", + iter->bio->bi_bdev->bd_disk->disk_name, iter->seed, + guard, iter->csum); + return BLK_STS_PROTECTION; + } - csum = t10_pi_csum(0, iter->data_buf, iter->interval, - bi->csum_type); - if (offset) - csum = t10_pi_csum(csum, iter->prot_buf, offset, - bi->csum_type); - - if (pi->guard_tag != csum) { - pr_err("%s: guard tag error at sector %llu " \ - "(rcvd %04x, want %04x)\n", iter->disk_name, - (unsigned long long)iter->seed, - be16_to_cpu(pi->guard_tag), be16_to_cpu(csum)); + return BLK_STS_OK; +} + +static blk_status_t blk_verify_pi(struct blk_integrity_iter *iter, + struct t10_pi_tuple *pi, u16 guard) +{ + u32 seed = lower_32_bits(iter->seed); + u32 ref = get_unaligned_be32(&pi->ref_tag); + u16 app = get_unaligned_be16(&pi->app_tag); + + if (iter->bi->flags & BLK_INTEGRITY_REF_TAG) { + if (app == APP_TAG_ESCAPE) + return BLK_STS_OK; + if (ref != seed) { + pr_err("%s: ref tag error at location %u (rcvd %u)\n", + iter->bio->bi_bdev->bd_disk->disk_name, seed, + ref); return BLK_STS_PROTECTION; } + } else if (app == APP_TAG_ESCAPE && ref == REF_TAG_ESCAPE) { + return BLK_STS_OK; + } -next: - iter->data_buf += iter->interval; - iter->prot_buf += bi->metadata_size; - iter->seed++; + if (guard != (u16)iter->csum) { + pr_err("%s: guard tag error at sector %llu (rcvd %04x, want %04x)\n", + iter->bio->bi_bdev->bd_disk->disk_name, iter->seed, + guard, (u16)iter->csum); + return BLK_STS_PROTECTION; } return BLK_STS_OK; } -/** - * t10_pi_type1_prepare - prepare PI prior submitting request to device - * @rq: request with PI that should be prepared - * - * For Type 1/Type 2, the virtual start sector is the one that was - * originally submitted by the block layer for the ref_tag usage. Due to - * partitioning, MD/DM cloning, etc. the actual physical start sector is - * likely to be different. Remap protection information to match the - * physical LBA. - */ -static void t10_pi_type1_prepare(struct request *rq) +static blk_status_t blk_verify_t10_pi(struct blk_integrity_iter *iter, + struct t10_pi_tuple *pi) { - struct blk_integrity *bi = &rq->q->limits.integrity; - const int tuple_sz = bi->metadata_size; - u32 ref_tag = t10_pi_ref_tag(rq); - u8 offset = bi->pi_offset; - struct bio *bio; + u16 guard = get_unaligned_be16(&pi->guard_tag); - __rq_for_each_bio(bio, rq) { - struct bio_integrity_payload *bip = bio_integrity(bio); - u32 virt = bip_get_seed(bip) & 0xffffffff; - struct bio_vec iv; - struct bvec_iter iter; + return blk_verify_pi(iter, pi, guard); +} - /* Already remapped? */ - if (bip->bip_flags & BIP_MAPPED_INTEGRITY) - break; +static blk_status_t blk_verify_ip_pi(struct blk_integrity_iter *iter, + struct t10_pi_tuple *pi) +{ + u16 guard = get_unaligned((u16 *)&pi->guard_tag); - bip_for_each_vec(iv, bip, iter) { - unsigned int j; - void *p; - - p = bvec_kmap_local(&iv); - for (j = 0; j < iv.bv_len; j += tuple_sz) { - struct t10_pi_tuple *pi = p + offset; - - if (be32_to_cpu(pi->ref_tag) == virt) - pi->ref_tag = cpu_to_be32(ref_tag); - virt++; - ref_tag++; - p += tuple_sz; - } - kunmap_local(p); - } + return blk_verify_pi(iter, pi, guard); +} - bip->bip_flags |= BIP_MAPPED_INTEGRITY; +static blk_status_t blk_integrity_verify(struct blk_integrity_iter *iter, + union pi_tuple *tuple) +{ + switch (iter->bi->csum_type) { + case BLK_INTEGRITY_CSUM_CRC64: + return blk_verify_ext_pi(iter, &tuple->crc64_pi); + case BLK_INTEGRITY_CSUM_CRC: + return blk_verify_t10_pi(iter, &tuple->t10_pi); + case BLK_INTEGRITY_CSUM_IP: + return blk_verify_ip_pi(iter, &tuple->t10_pi); + default: + return BLK_STS_OK; } } -/** - * t10_pi_type1_complete - prepare PI prior returning request to the blk layer - * @rq: request with PI that should be prepared - * @nr_bytes: total bytes to prepare - * - * For Type 1/Type 2, the virtual start sector is the one that was - * originally submitted by the block layer for the ref_tag usage. Due to - * partitioning, MD/DM cloning, etc. the actual physical start sector is - * likely to be different. Since the physical start sector was submitted - * to the device, we should remap it back to virtual values expected by the - * block layer. - */ -static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes) +static void blk_set_ext_pi(struct blk_integrity_iter *iter, + struct crc64_pi_tuple *pi) { - struct blk_integrity *bi = &rq->q->limits.integrity; - unsigned intervals = nr_bytes >> bi->interval_exp; - const int tuple_sz = bi->metadata_size; - u32 ref_tag = t10_pi_ref_tag(rq); - u8 offset = bi->pi_offset; - struct bio *bio; + put_unaligned_be64(iter->csum, &pi->guard_tag); + put_unaligned_be16(0, &pi->app_tag); + put_unaligned_be48(iter->seed, &pi->ref_tag); +} - __rq_for_each_bio(bio, rq) { - struct bio_integrity_payload *bip = bio_integrity(bio); - u32 virt = bip_get_seed(bip) & 0xffffffff; - struct bio_vec iv; - struct bvec_iter iter; - - bip_for_each_vec(iv, bip, iter) { - unsigned int j; - void *p; - - p = bvec_kmap_local(&iv); - for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) { - struct t10_pi_tuple *pi = p + offset; - - if (be32_to_cpu(pi->ref_tag) == ref_tag) - pi->ref_tag = cpu_to_be32(virt); - virt++; - ref_tag++; - intervals--; - p += tuple_sz; - } - kunmap_local(p); - } +static void blk_set_pi(struct blk_integrity_iter *iter, + struct t10_pi_tuple *pi, __be16 csum) +{ + put_unaligned(csum, &pi->guard_tag); + put_unaligned_be16(0, &pi->app_tag); + put_unaligned_be32(iter->seed, &pi->ref_tag); +} + +static void blk_set_t10_pi(struct blk_integrity_iter *iter, + struct t10_pi_tuple *pi) +{ + blk_set_pi(iter, pi, cpu_to_be16((u16)iter->csum)); +} + +static void blk_set_ip_pi(struct blk_integrity_iter *iter, + struct t10_pi_tuple *pi) +{ + blk_set_pi(iter, pi, (__force __be16)(u16)iter->csum); +} + +static void blk_integrity_set(struct blk_integrity_iter *iter, + union pi_tuple *tuple) +{ + switch (iter->bi->csum_type) { + case BLK_INTEGRITY_CSUM_CRC64: + return blk_set_ext_pi(iter, &tuple->crc64_pi); + case BLK_INTEGRITY_CSUM_CRC: + return blk_set_t10_pi(iter, &tuple->t10_pi); + case BLK_INTEGRITY_CSUM_IP: + return blk_set_ip_pi(iter, &tuple->t10_pi); + default: + WARN_ON_ONCE(1); + return; } } -static __be64 ext_pi_crc64(u64 crc, void *data, unsigned int len) +static blk_status_t blk_integrity_interval(struct blk_integrity_iter *iter, + bool verify) { - return cpu_to_be64(crc64_nvme(crc, data, len)); + blk_status_t ret = BLK_STS_OK; + union pi_tuple tuple; + void *ptuple = &tuple; + struct bio_vec pbv; + + blk_integrity_csum_offset(iter); + pbv = mp_bvec_iter_bvec(iter->bip->bip_vec, iter->prot_iter); + if (pbv.bv_len >= iter->bi->pi_tuple_size) { + ptuple = bvec_kmap_local(&pbv); + bvec_iter_advance_single(iter->bip->bip_vec, &iter->prot_iter, + iter->bi->metadata_size - iter->bi->pi_offset); + } else if (verify) { + blk_integrity_copy_to_tuple(iter->bip, &iter->prot_iter, + ptuple, iter->bi->pi_tuple_size); + } + + if (verify) + ret = blk_integrity_verify(iter, ptuple); + else + blk_integrity_set(iter, ptuple); + + if (likely(ptuple != &tuple)) { + kunmap_local(ptuple); + } else if (!verify) { + blk_integrity_copy_from_tuple(iter->bip, &iter->prot_iter, + ptuple, iter->bi->pi_tuple_size); + } + + + iter->interval_remaining = 1 << iter->bi->interval_exp; + iter->csum = 0; + iter->seed++; + + return ret; } -static void ext_pi_crc64_generate(struct blk_integrity_iter *iter, - struct blk_integrity *bi) +static void blk_integrity_iterate(struct bio *bio, struct bvec_iter *data_iter, + bool verify) { - u8 offset = bi->pi_offset; - unsigned int i; + struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); + struct bio_integrity_payload *bip = bio_integrity(bio); + struct blk_integrity_iter iter = { + .bio = bio, + .bip = bip, + .bi = bi, + .data_iter = *data_iter, + .prot_iter = bip->bip_iter, + .interval_remaining = 1 << bi->interval_exp, + .seed = data_iter->bi_sector, + .csum = 0, + }; + blk_status_t ret = BLK_STS_OK; + + while (iter.data_iter.bi_size && ret == BLK_STS_OK) { + struct bio_vec bv = mp_bvec_iter_bvec(iter.bio->bi_io_vec, + iter.data_iter); + void *kaddr = bvec_kmap_local(&bv); + void *data = kaddr; + unsigned int len; + + bvec_iter_advance_single(iter.bio->bi_io_vec, &iter.data_iter, + bv.bv_len); + while (bv.bv_len && ret == BLK_STS_OK) { + len = min(iter.interval_remaining, bv.bv_len); + blk_calculate_guard(&iter, data, len); + bv.bv_len -= len; + data += len; + iter.interval_remaining -= len; + if (!iter.interval_remaining) + ret = blk_integrity_interval(&iter, verify); + } + kunmap_local(kaddr); + } + + if (ret) + bio->bi_status = ret; +} - for (i = 0 ; i < iter->data_size ; i += iter->interval) { - struct crc64_pi_tuple *pi = iter->prot_buf + offset; +void blk_integrity_generate(struct bio *bio) +{ + struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); - pi->guard_tag = ext_pi_crc64(0, iter->data_buf, iter->interval); - if (offset) - pi->guard_tag = ext_pi_crc64(be64_to_cpu(pi->guard_tag), - iter->prot_buf, offset); - pi->app_tag = 0; + switch (bi->csum_type) { + case BLK_INTEGRITY_CSUM_CRC64: + case BLK_INTEGRITY_CSUM_CRC: + case BLK_INTEGRITY_CSUM_IP: + blk_integrity_iterate(bio, &bio->bi_iter, false); + break; + default: + break; + } +} - if (bi->flags & BLK_INTEGRITY_REF_TAG) - put_unaligned_be48(iter->seed, pi->ref_tag); - else - put_unaligned_be48(0ULL, pi->ref_tag); +void blk_integrity_verify_iter(struct bio *bio, struct bvec_iter *saved_iter) +{ + struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); - iter->data_buf += iter->interval; - iter->prot_buf += bi->metadata_size; - iter->seed++; + switch (bi->csum_type) { + case BLK_INTEGRITY_CSUM_CRC64: + case BLK_INTEGRITY_CSUM_CRC: + case BLK_INTEGRITY_CSUM_IP: + blk_integrity_iterate(bio, saved_iter, true); + break; + default: + break; } } -static bool ext_pi_ref_escape(const u8 ref_tag[6]) +/* + * Advance @iter past the protection offset for protection formats that + * contain front padding on the metadata region. + */ +static void blk_pi_advance_offset(struct blk_integrity *bi, + struct bio_integrity_payload *bip, + struct bvec_iter *iter) { - static const u8 ref_escape[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + unsigned int offset = bi->pi_offset; - return memcmp(ref_tag, ref_escape, sizeof(ref_escape)) == 0; + while (offset > 0) { + struct bio_vec bv = mp_bvec_iter_bvec(bip->bip_vec, *iter); + unsigned int len = min(bv.bv_len, offset); + + bvec_iter_advance_single(bip->bip_vec, iter, len); + offset -= len; + } } -static blk_status_t ext_pi_crc64_verify(struct blk_integrity_iter *iter, - struct blk_integrity *bi) -{ - u8 offset = bi->pi_offset; - unsigned int i; - - for (i = 0; i < iter->data_size; i += iter->interval) { - struct crc64_pi_tuple *pi = iter->prot_buf + offset; - u64 ref, seed; - __be64 csum; - - if (bi->flags & BLK_INTEGRITY_REF_TAG) { - if (pi->app_tag == T10_PI_APP_ESCAPE) - goto next; - - ref = get_unaligned_be48(pi->ref_tag); - seed = lower_48_bits(iter->seed); - if (ref != seed) { - pr_err("%s: ref tag error at location %llu (rcvd %llu)\n", - iter->disk_name, seed, ref); - return BLK_STS_PROTECTION; - } - } else { - if (pi->app_tag == T10_PI_APP_ESCAPE && - ext_pi_ref_escape(pi->ref_tag)) - goto next; - } +static void *blk_tuple_remap_begin(union pi_tuple *tuple, + struct blk_integrity *bi, + struct bio_integrity_payload *bip, + struct bvec_iter *iter) +{ + struct bvec_iter titer; + struct bio_vec pbv; - csum = ext_pi_crc64(0, iter->data_buf, iter->interval); - if (offset) - csum = ext_pi_crc64(be64_to_cpu(csum), iter->prot_buf, - offset); + blk_pi_advance_offset(bi, bip, iter); + pbv = mp_bvec_iter_bvec(bip->bip_vec, *iter); + if (likely(pbv.bv_len >= bi->pi_tuple_size)) + return bvec_kmap_local(&pbv); - if (pi->guard_tag != csum) { - pr_err("%s: guard tag error at sector %llu " \ - "(rcvd %016llx, want %016llx)\n", - iter->disk_name, (unsigned long long)iter->seed, - be64_to_cpu(pi->guard_tag), be64_to_cpu(csum)); - return BLK_STS_PROTECTION; - } + /* + * We need to preserve the state of the original iter for the + * copy_from_tuple at the end, so make a temp iter for here. + */ + titer = *iter; + blk_integrity_copy_to_tuple(bip, &titer, tuple, bi->pi_tuple_size); + return tuple; +} -next: - iter->data_buf += iter->interval; - iter->prot_buf += bi->metadata_size; - iter->seed++; +static void blk_tuple_remap_end(union pi_tuple *tuple, void *ptuple, + struct blk_integrity *bi, + struct bio_integrity_payload *bip, + struct bvec_iter *iter) +{ + unsigned int len = bi->metadata_size - bi->pi_offset; + + if (likely(ptuple != tuple)) { + kunmap_local(ptuple); + } else { + blk_integrity_copy_from_tuple(bip, iter, ptuple, + bi->pi_tuple_size); + len -= bi->pi_tuple_size; } - return BLK_STS_OK; + bvec_iter_advance(bip->bip_vec, iter, len); } -static void ext_pi_type1_prepare(struct request *rq) +static void blk_set_ext_unmap_ref(struct crc64_pi_tuple *pi, u64 virt, + u64 ref_tag) { - struct blk_integrity *bi = &rq->q->limits.integrity; - const int tuple_sz = bi->metadata_size; - u64 ref_tag = ext_pi_ref_tag(rq); - u8 offset = bi->pi_offset; - struct bio *bio; + u64 ref = get_unaligned_be48(&pi->ref_tag); - __rq_for_each_bio(bio, rq) { - struct bio_integrity_payload *bip = bio_integrity(bio); - u64 virt = lower_48_bits(bip_get_seed(bip)); - struct bio_vec iv; - struct bvec_iter iter; + if (ref == lower_48_bits(ref_tag) && ref != lower_48_bits(virt)) + put_unaligned_be48(virt, pi->ref_tag); +} - /* Already remapped? */ - if (bip->bip_flags & BIP_MAPPED_INTEGRITY) - break; +static void blk_set_t10_unmap_ref(struct t10_pi_tuple *pi, u32 virt, + u32 ref_tag) +{ + u32 ref = get_unaligned_be32(&pi->ref_tag); - bip_for_each_vec(iv, bip, iter) { - unsigned int j; - void *p; - - p = bvec_kmap_local(&iv); - for (j = 0; j < iv.bv_len; j += tuple_sz) { - struct crc64_pi_tuple *pi = p + offset; - u64 ref = get_unaligned_be48(pi->ref_tag); - - if (ref == virt) - put_unaligned_be48(ref_tag, pi->ref_tag); - virt++; - ref_tag++; - p += tuple_sz; - } - kunmap_local(p); - } + if (ref == ref_tag && ref != virt) + put_unaligned_be32(virt, &pi->ref_tag); +} - bip->bip_flags |= BIP_MAPPED_INTEGRITY; +static void blk_reftag_remap_complete(struct blk_integrity *bi, + union pi_tuple *tuple, u64 virt, u64 ref) +{ + switch (bi->csum_type) { + case BLK_INTEGRITY_CSUM_CRC64: + blk_set_ext_unmap_ref(&tuple->crc64_pi, virt, ref); + break; + case BLK_INTEGRITY_CSUM_CRC: + case BLK_INTEGRITY_CSUM_IP: + blk_set_t10_unmap_ref(&tuple->t10_pi, virt, ref); + break; + default: + WARN_ON_ONCE(1); + break; } } -static void ext_pi_type1_complete(struct request *rq, unsigned int nr_bytes) +static void blk_set_ext_map_ref(struct crc64_pi_tuple *pi, u64 virt, + u64 ref_tag) { - struct blk_integrity *bi = &rq->q->limits.integrity; - unsigned intervals = nr_bytes >> bi->interval_exp; - const int tuple_sz = bi->metadata_size; - u64 ref_tag = ext_pi_ref_tag(rq); - u8 offset = bi->pi_offset; - struct bio *bio; + u64 ref = get_unaligned_be48(&pi->ref_tag); - __rq_for_each_bio(bio, rq) { - struct bio_integrity_payload *bip = bio_integrity(bio); - u64 virt = lower_48_bits(bip_get_seed(bip)); - struct bio_vec iv; - struct bvec_iter iter; - - bip_for_each_vec(iv, bip, iter) { - unsigned int j; - void *p; - - p = bvec_kmap_local(&iv); - for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) { - struct crc64_pi_tuple *pi = p + offset; - u64 ref = get_unaligned_be48(pi->ref_tag); - - if (ref == ref_tag) - put_unaligned_be48(virt, pi->ref_tag); - virt++; - ref_tag++; - intervals--; - p += tuple_sz; - } - kunmap_local(p); - } - } + if (ref == lower_48_bits(virt) && ref != ref_tag) + put_unaligned_be48(ref_tag, pi->ref_tag); } -void blk_integrity_generate(struct bio *bio) +static void blk_set_t10_map_ref(struct t10_pi_tuple *pi, u32 virt, u32 ref_tag) { - struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); - struct bio_integrity_payload *bip = bio_integrity(bio); - struct blk_integrity_iter iter; - struct bvec_iter bviter; - struct bio_vec bv; - - iter.disk_name = bio->bi_bdev->bd_disk->disk_name; - iter.interval = 1 << bi->interval_exp; - iter.seed = bio->bi_iter.bi_sector; - iter.prot_buf = bvec_virt(bip->bip_vec); - bio_for_each_segment(bv, bio, bviter) { - void *kaddr = bvec_kmap_local(&bv); + u32 ref = get_unaligned_be32(&pi->ref_tag); - iter.data_buf = kaddr; - iter.data_size = bv.bv_len; - switch (bi->csum_type) { - case BLK_INTEGRITY_CSUM_CRC64: - ext_pi_crc64_generate(&iter, bi); - break; - case BLK_INTEGRITY_CSUM_CRC: - case BLK_INTEGRITY_CSUM_IP: - t10_pi_generate(&iter, bi); - break; - default: - break; - } - kunmap_local(kaddr); + if (ref == virt && ref != ref_tag) + put_unaligned_be32(ref_tag, &pi->ref_tag); +} + +static void blk_reftag_remap_prepare(struct blk_integrity *bi, + union pi_tuple *tuple, + u64 virt, u64 ref) +{ + switch (bi->csum_type) { + case BLK_INTEGRITY_CSUM_CRC64: + blk_set_ext_map_ref(&tuple->crc64_pi, virt, ref); + break; + case BLK_INTEGRITY_CSUM_CRC: + case BLK_INTEGRITY_CSUM_IP: + blk_set_t10_map_ref(&tuple->t10_pi, virt, ref); + break; + default: + WARN_ON_ONCE(1); + break; } } -void blk_integrity_verify_iter(struct bio *bio, struct bvec_iter *saved_iter) +static void __blk_reftag_remap(struct bio *bio, struct blk_integrity *bi, + unsigned *intervals, u64 *ref, bool prep) { - struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); struct bio_integrity_payload *bip = bio_integrity(bio); - struct blk_integrity_iter iter; - struct bvec_iter bviter; - struct bio_vec bv; + struct bvec_iter iter = bip->bip_iter; + u64 virt = bip_get_seed(bip); + union pi_tuple tuple; + union pi_tuple *ptuple; - /* - * At the moment verify is called bi_iter has been advanced during split - * and completion, so use the copy created during submission here. - */ - iter.disk_name = bio->bi_bdev->bd_disk->disk_name; - iter.interval = 1 << bi->interval_exp; - iter.seed = saved_iter->bi_sector; - iter.prot_buf = bvec_virt(bip->bip_vec); - __bio_for_each_segment(bv, bio, bviter, *saved_iter) { - void *kaddr = bvec_kmap_local(&bv); - blk_status_t ret = BLK_STS_OK; + if (prep && bip->bip_flags & BIP_MAPPED_INTEGRITY) { + *ref += bio->bi_iter.bi_size >> bi->interval_exp; + return; + } - iter.data_buf = kaddr; - iter.data_size = bv.bv_len; - switch (bi->csum_type) { - case BLK_INTEGRITY_CSUM_CRC64: - ret = ext_pi_crc64_verify(&iter, bi); - break; - case BLK_INTEGRITY_CSUM_CRC: - case BLK_INTEGRITY_CSUM_IP: - ret = t10_pi_verify(&iter, bi); - break; - default: - break; - } - kunmap_local(kaddr); + while (iter.bi_size && *intervals) { + ptuple = blk_tuple_remap_begin(&tuple, bi, bip, &iter); - if (ret) { - bio->bi_status = ret; - return; - } + if (prep) + blk_reftag_remap_prepare(bi, ptuple, virt, *ref); + else + blk_reftag_remap_complete(bi, ptuple, virt, *ref); + + blk_tuple_remap_end(&tuple, ptuple, bi, bip, &iter); + (*intervals)--; + (*ref)++; + virt++; } + + if (prep) + bip->bip_flags |= BIP_MAPPED_INTEGRITY; } -void blk_integrity_prepare(struct request *rq) +static void blk_integrity_remap(struct request *rq, unsigned int nr_bytes, + bool prep) { struct blk_integrity *bi = &rq->q->limits.integrity; + u64 ref = blk_rq_pos(rq) >> (bi->interval_exp - SECTOR_SHIFT); + unsigned intervals = nr_bytes >> bi->interval_exp; + struct bio *bio; if (!(bi->flags & BLK_INTEGRITY_REF_TAG)) return; - if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC64) - ext_pi_type1_prepare(rq); - else - t10_pi_type1_prepare(rq); + __rq_for_each_bio(bio, rq) { + __blk_reftag_remap(bio, bi, &intervals, &ref, prep); + if (!intervals) + break; + } } -void blk_integrity_complete(struct request *rq, unsigned int nr_bytes) +void blk_integrity_prepare(struct request *rq) { - struct blk_integrity *bi = &rq->q->limits.integrity; - - if (!(bi->flags & BLK_INTEGRITY_REF_TAG)) - return; + blk_integrity_remap(rq, blk_rq_bytes(rq), true); +} - if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC64) - ext_pi_type1_complete(rq, nr_bytes); - else - t10_pi_type1_complete(rq, nr_bytes); +void blk_integrity_complete(struct request *rq, unsigned int nr_bytes) +{ + blk_integrity_remap(rq, nr_bytes, false); } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 4da937f96ae2e..7c371bda03806 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1874,6 +1874,7 @@ static bool nvme_init_integrity(struct nvme_ns_head *head, break; } + bi->split_interval_capable = true; bi->metadata_size = head->ms; if (bi->csum_type) { bi->pi_tuple_size = head->pi_size; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 2fff8a80dbd25..c21d7674fc24d 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -122,6 +122,7 @@ struct blk_integrity { unsigned char interval_exp; unsigned char tag_size; unsigned char pi_tuple_size; + bool split_interval_capable; }; typedef unsigned int __bitwise blk_mode_t; -- 2.47.3