@@ -293,12 +293,11 @@ dump_write(dmu_sendarg_t *dsp, dmu_object_type_t type, uint64_t object,
293
293
ASSERT (BP_IS_PROTECTED (bp ));
294
294
295
295
/*
296
- * This is a raw protected block so we set the encrypted
297
- * flag. We need to pass along everything the receiving
298
- * side will need to interpret this block, including the
299
- * byteswap, salt, IV, and MAC.
296
+ * This is a raw protected block so we need to pass
297
+ * along everything the receiving side will need to
298
+ * interpret this block, including the byteswap, salt,
299
+ * IV, and MAC.
300
300
*/
301
- drrw -> drr_flags |= DRR_RAW_ENCRYPTED ;
302
301
if (BP_SHOULD_BYTESWAP (bp ))
303
302
drrw -> drr_flags |= DRR_RAW_BYTESWAP ;
304
303
zio_crypt_decode_params_bp (bp , drrw -> drr_salt ,
@@ -401,9 +400,9 @@ dump_spill(dmu_sendarg_t *dsp, const blkptr_t *bp, uint64_t object, void *data)
401
400
drrs -> drr_toguid = dsp -> dsa_toguid ;
402
401
403
402
/* handle raw send fields */
404
- if (( dsp -> dsa_featureflags & DMU_BACKUP_FEATURE_RAW ) != 0 &&
405
- BP_IS_PROTECTED (bp )) {
406
- drrs -> drr_flags |= DRR_RAW_ENCRYPTED ;
403
+ if (dsp -> dsa_featureflags & DMU_BACKUP_FEATURE_RAW ) {
404
+ ASSERT ( BP_IS_PROTECTED (bp ));
405
+
407
406
if (BP_SHOULD_BYTESWAP (bp ))
408
407
drrs -> drr_flags |= DRR_RAW_BYTESWAP ;
409
408
drrs -> drr_compressiontype = BP_GET_COMPRESS (bp );
@@ -508,9 +507,9 @@ dump_dnode(dmu_sendarg_t *dsp, const blkptr_t *bp, uint64_t object,
508
507
drro -> drr_blksz > SPA_OLD_MAXBLOCKSIZE )
509
508
drro -> drr_blksz = SPA_OLD_MAXBLOCKSIZE ;
510
509
511
- if ((dsp -> dsa_featureflags & DMU_BACKUP_FEATURE_RAW ) &&
512
- BP_IS_PROTECTED ( bp )) {
513
- drro -> drr_flags |= DRR_RAW_ENCRYPTED ;
510
+ if ((dsp -> dsa_featureflags & DMU_BACKUP_FEATURE_RAW )) {
511
+ ASSERT ( BP_IS_ENCRYPTED ( bp ));
512
+
514
513
if (BP_SHOULD_BYTESWAP (bp ))
515
514
drro -> drr_flags |= DRR_RAW_BYTESWAP ;
516
515
@@ -567,7 +566,6 @@ dump_object_range(dmu_sendarg_t *dsp, const blkptr_t *bp, uint64_t firstobj,
567
566
drror -> drr_firstobj = firstobj ;
568
567
drror -> drr_numslots = numslots ;
569
568
drror -> drr_toguid = dsp -> dsa_toguid ;
570
- drror -> drr_flags |= DRR_RAW_ENCRYPTED ;
571
569
if (BP_SHOULD_BYTESWAP (bp ))
572
570
drror -> drr_flags |= DRR_RAW_BYTESWAP ;
573
571
zio_crypt_decode_params_bp (bp , drror -> drr_salt , drror -> drr_iv );
@@ -1684,15 +1682,13 @@ dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
1684
1682
return (error );
1685
1683
}
1686
1684
if (!origin -> ds_is_snapshot ) {
1687
- dsl_dataset_rele_flags (origin ,
1688
- DS_HOLD_FLAG_DECRYPT , FTAG );
1685
+ dsl_dataset_rele_flags (origin , dsflags , FTAG );
1689
1686
dsl_dataset_rele_flags (ds , dsflags , FTAG );
1690
1687
return (SET_ERROR (EINVAL ));
1691
1688
}
1692
1689
if (dsl_dataset_phys (origin )-> ds_guid != fromguid &&
1693
1690
fromguid != 0 ) {
1694
- dsl_dataset_rele_flags (origin ,
1695
- DS_HOLD_FLAG_DECRYPT , FTAG );
1691
+ dsl_dataset_rele_flags (origin , dsflags , FTAG );
1696
1692
dsl_dataset_rele_flags (ds , dsflags , FTAG );
1697
1693
return (SET_ERROR (ENODEV ));
1698
1694
}
@@ -2081,6 +2077,7 @@ struct receive_writer_arg {
2081
2077
/* A map from guid to dataset to help handle dedup'd streams. */
2082
2078
avl_tree_t * guid_to_ds_map ;
2083
2079
boolean_t resumable ;
2080
+ boolean_t raw ;
2084
2081
uint64_t last_object , last_offset ;
2085
2082
uint64_t bytes_read ; /* bytes read when current record created */
2086
2083
};
@@ -2115,6 +2112,7 @@ struct receive_arg {
2115
2112
zio_cksum_t prev_cksum ;
2116
2113
int err ;
2117
2114
boolean_t byteswap ;
2115
+ boolean_t raw ;
2118
2116
uint64_t featureflags ;
2119
2117
/* Sorted list of objects not to issue prefetches for. */
2120
2118
struct objlist ignore_objlist ;
@@ -2359,7 +2357,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
2359
2357
return (SET_ERROR (EINVAL ));
2360
2358
}
2361
2359
2362
- if (DRR_IS_RAW_ENCRYPTED ( drro -> drr_flags ) ) {
2360
+ if (rwa -> raw ) {
2363
2361
if (drro -> drr_raw_bonuslen < drro -> drr_bonuslen ||
2364
2362
drro -> drr_indblkshift > SPA_MAXBLOCKSHIFT ||
2365
2363
drro -> drr_nlevels > DN_MAX_LEVELS ||
@@ -2394,13 +2392,12 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
2394
2392
drro -> drr_bonuslen );
2395
2393
2396
2394
/* nblkptr will be bounded by the bonus size and type */
2397
- if (DRR_IS_RAW_ENCRYPTED (drro -> drr_flags ) &&
2398
- nblkptr != drro -> drr_nblkptr )
2395
+ if (rwa -> raw && nblkptr != drro -> drr_nblkptr )
2399
2396
return (SET_ERROR (EINVAL ));
2400
2397
2401
2398
if (drro -> drr_blksz != doi .doi_data_block_size ||
2402
2399
nblkptr < doi .doi_nblkptr ||
2403
- (DRR_IS_RAW_ENCRYPTED ( drro -> drr_flags ) &&
2400
+ (rwa -> raw &&
2404
2401
(indblksz != doi .doi_metadata_block_size ||
2405
2402
drro -> drr_nlevels < doi .doi_indirection ))) {
2406
2403
err = dmu_free_long_range (rwa -> os , drro -> drr_object ,
@@ -2438,13 +2435,16 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
2438
2435
return (SET_ERROR (EINVAL ));
2439
2436
}
2440
2437
2438
+ if (rwa -> raw )
2439
+ VERIFY0 (dmu_object_dirty_raw (rwa -> os , drro -> drr_object , tx ));
2440
+
2441
2441
dmu_object_set_checksum (rwa -> os , drro -> drr_object ,
2442
2442
drro -> drr_checksumtype , tx );
2443
2443
dmu_object_set_compress (rwa -> os , drro -> drr_object ,
2444
2444
drro -> drr_compress , tx );
2445
2445
2446
2446
/* handle more restrictive dnode structuring for raw recvs */
2447
- if (DRR_IS_RAW_ENCRYPTED ( drro -> drr_flags ) ) {
2447
+ if (rwa -> raw ) {
2448
2448
/*
2449
2449
* Set the indirect block shift and nlevels. This will not fail
2450
2450
* because we ensured all of the blocks were free earlier if
@@ -2460,7 +2460,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
2460
2460
dmu_buf_t * db ;
2461
2461
uint32_t flags = DMU_READ_NO_PREFETCH ;
2462
2462
2463
- if (DRR_IS_RAW_ENCRYPTED ( drro -> drr_flags ) )
2463
+ if (rwa -> raw )
2464
2464
flags |= DMU_READ_NO_DECRYPT ;
2465
2465
2466
2466
VERIFY0 (dmu_bonus_hold_impl (rwa -> os , drro -> drr_object ,
@@ -2474,7 +2474,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
2474
2474
* Raw bonus buffers have their byteorder determined by the
2475
2475
* DRR_OBJECT_RANGE record.
2476
2476
*/
2477
- if (rwa -> byteswap && !DRR_IS_RAW_ENCRYPTED ( drro -> drr_flags ) ) {
2477
+ if (rwa -> byteswap && !rwa -> raw ) {
2478
2478
dmu_object_byteswap_t byteswap =
2479
2479
DMU_OT_BYTESWAP (drro -> drr_bonustype );
2480
2480
dmu_ot_byteswap [byteswap ].ob_func (db -> db_data ,
@@ -2550,6 +2550,10 @@ receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw,
2550
2550
dmu_tx_abort (tx );
2551
2551
return (err );
2552
2552
}
2553
+
2554
+ if (rwa -> raw )
2555
+ VERIFY0 (dmu_object_dirty_raw (rwa -> os , drrw -> drr_object , tx ));
2556
+
2553
2557
if (rwa -> byteswap && !arc_is_encrypted (abuf ) &&
2554
2558
arc_get_compression (abuf ) == ZIO_COMPRESS_OFF ) {
2555
2559
dmu_object_byteswap_t byteswap =
@@ -2616,9 +2620,8 @@ receive_write_byref(struct receive_writer_arg *rwa,
2616
2620
ref_os = rwa -> os ;
2617
2621
}
2618
2622
2619
- if (DRR_IS_RAW_ENCRYPTED ( drrwbr -> drr_flags )) {
2623
+ if (rwa -> raw )
2620
2624
flags |= DMU_READ_NO_DECRYPT ;
2621
- }
2622
2625
2623
2626
/* may return either a regular db or an encrypted one */
2624
2627
err = dmu_buf_hold (ref_os , drrwbr -> drr_refobject ,
@@ -2636,7 +2639,8 @@ receive_write_byref(struct receive_writer_arg *rwa,
2636
2639
return (err );
2637
2640
}
2638
2641
2639
- if (DRR_IS_RAW_ENCRYPTED (drrwbr -> drr_flags )) {
2642
+ if (rwa -> raw ) {
2643
+ VERIFY0 (dmu_object_dirty_raw (rwa -> os , drrwbr -> drr_object , tx ));
2640
2644
dmu_copy_from_buf (rwa -> os , drrwbr -> drr_object ,
2641
2645
drrwbr -> drr_offset , dbp , tx );
2642
2646
} else {
@@ -2702,7 +2706,7 @@ receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
2702
2706
drrs -> drr_length > spa_maxblocksize (dmu_objset_spa (rwa -> os )))
2703
2707
return (SET_ERROR (EINVAL ));
2704
2708
2705
- if (DRR_IS_RAW_ENCRYPTED ( drrs -> drr_flags ) ) {
2709
+ if (rwa -> raw ) {
2706
2710
if (!DMU_OT_IS_VALID (drrs -> drr_type ) ||
2707
2711
drrs -> drr_compressiontype >= ZIO_COMPRESS_FUNCTIONS ||
2708
2712
drrs -> drr_compressed_size == 0 )
@@ -2730,6 +2734,8 @@ receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
2730
2734
return (err );
2731
2735
}
2732
2736
dmu_buf_will_dirty (db_spill , tx );
2737
+ if (rwa -> raw )
2738
+ VERIFY0 (dmu_object_dirty_raw (rwa -> os , drrs -> drr_object , tx ));
2733
2739
2734
2740
if (db_spill -> db_size < drrs -> drr_length )
2735
2741
VERIFY (0 == dbuf_spill_set_blksz (db_spill ,
@@ -2795,7 +2801,7 @@ receive_object_range(struct receive_writer_arg *rwa,
2795
2801
*/
2796
2802
if (drror -> drr_numslots != DNODES_PER_BLOCK ||
2797
2803
P2PHASE (drror -> drr_firstobj , DNODES_PER_BLOCK ) != 0 ||
2798
- !DRR_IS_RAW_ENCRYPTED ( drror -> drr_flags ) )
2804
+ !rwa -> raw )
2799
2805
return (SET_ERROR (EINVAL ));
2800
2806
2801
2807
offset = drror -> drr_firstobj * sizeof (dnode_phys_t );
@@ -3075,7 +3081,7 @@ receive_read_record(struct receive_arg *ra)
3075
3081
arc_buf_t * abuf ;
3076
3082
boolean_t is_meta = DMU_OT_IS_METADATA (drrw -> drr_type );
3077
3083
3078
- if (DRR_IS_RAW_ENCRYPTED ( drrw -> drr_flags ) ) {
3084
+ if (ra -> raw ) {
3079
3085
boolean_t byteorder = ZFS_HOST_BYTEORDER ^
3080
3086
!!DRR_IS_RAW_BYTESWAPPED (drrw -> drr_flags ) ^
3081
3087
ra -> byteswap ;
@@ -3159,7 +3165,7 @@ receive_read_record(struct receive_arg *ra)
3159
3165
int len = DRR_SPILL_PAYLOAD_SIZE (drrs );
3160
3166
3161
3167
/* DRR_SPILL records are either raw or uncompressed */
3162
- if (DRR_IS_RAW_ENCRYPTED ( drrs -> drr_flags ) ) {
3168
+ if (ra -> raw ) {
3163
3169
boolean_t byteorder = ZFS_HOST_BYTEORDER ^
3164
3170
!!DRR_IS_RAW_BYTESWAPPED (drrs -> drr_flags ) ^
3165
3171
ra -> byteswap ;
@@ -3360,6 +3366,7 @@ dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp,
3360
3366
nvlist_t * begin_nvl = NULL ;
3361
3367
3362
3368
ra .byteswap = drc -> drc_byteswap ;
3369
+ ra .raw = drc -> drc_raw ;
3363
3370
ra .cksum = drc -> drc_cksum ;
3364
3371
ra .vp = vp ;
3365
3372
ra .voff = * voffp ;
@@ -3387,16 +3394,23 @@ dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp,
3387
3394
featureflags = DMU_GET_FEATUREFLAGS (drc -> drc_drrb -> drr_versioninfo );
3388
3395
ra .featureflags = featureflags ;
3389
3396
3397
+ /* embedded data is incompatible with encrypted datasets */
3398
+ if (ra .os -> os_encrypted &&
3399
+ (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA )) {
3400
+ err = SET_ERROR (EINVAL );
3401
+ goto out ;
3402
+ }
3403
+
3390
3404
/* if this stream is dedup'ed, set up the avl tree for guid mapping */
3391
3405
if (featureflags & DMU_BACKUP_FEATURE_DEDUP ) {
3392
3406
minor_t minor ;
3393
3407
3394
3408
if (cleanup_fd == -1 ) {
3395
- ra . err = SET_ERROR (EBADF );
3409
+ err = SET_ERROR (EBADF );
3396
3410
goto out ;
3397
3411
}
3398
- ra . err = zfs_onexit_fd_hold (cleanup_fd , & minor );
3399
- if (ra . err != 0 ) {
3412
+ err = zfs_onexit_fd_hold (cleanup_fd , & minor );
3413
+ if (err != 0 ) {
3400
3414
cleanup_fd = -1 ;
3401
3415
goto out ;
3402
3416
}
@@ -3410,12 +3424,12 @@ dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp,
3410
3424
err = zfs_onexit_add_cb (minor ,
3411
3425
free_guid_map_onexit , rwa .guid_to_ds_map ,
3412
3426
action_handlep );
3413
- if (ra . err != 0 )
3427
+ if (err != 0 )
3414
3428
goto out ;
3415
3429
} else {
3416
3430
err = zfs_onexit_cb_data (minor , * action_handlep ,
3417
3431
(void * * )& rwa .guid_to_ds_map );
3418
- if (ra . err != 0 )
3432
+ if (err != 0 )
3419
3433
goto out ;
3420
3434
}
3421
3435
@@ -3471,6 +3485,7 @@ dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp,
3471
3485
rwa .os = ra .os ;
3472
3486
rwa .byteswap = drc -> drc_byteswap ;
3473
3487
rwa .resumable = drc -> drc_resumable ;
3488
+ rwa .raw = drc -> drc_raw ;
3474
3489
3475
3490
(void ) thread_create (NULL , 0 , receive_writer_thread , & rwa , 0 , curproc ,
3476
3491
TS_RUN , minclsyspri );
0 commit comments