Skip to content

Commit 25ddf1f

Browse files
committed
Fix deletion of wrong data in abort MPU cond. scenarios
Add functional tests to cover scenarios where aborting an MPU could delete data from a different, completed MPU with the same object key. This prevents a regression of a bug where the wrong data was deleted because the uploadId was not being checked when looking for object metadata to clean up during an abort. Issue: CLDSRV-668
1 parent 71f35c8 commit 25ddf1f

File tree

2 files changed

+196
-1
lines changed

2 files changed

+196
-1
lines changed

lib/api/apiUtils/object/abortMultipartUpload.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
126126
return next(null, mpuBucket, storedParts, destBucket);
127127
}
128128

129-
if (objectMD?.location) {
129+
if (objectMD && objectMD.location && objectMD.uploadId === metadataValMPUparams.uploadId) {
130130
const existingLocations = new Set(locations.map(loc => loc.key));
131131
const remainingObjectLocations = objectMD.location.filter(loc => !existingLocations.has(loc.key));
132132
locations.push(...remainingObjectLocations);

tests/functional/aws-node-sdk/test/object/abortMPU.js

Lines changed: 195 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ const assert = require('assert');
22
const { v4: uuidv4 } = require('uuid');
33
const withV4 = require('../support/withV4');
44
const BucketUtility = require('../../lib/utility/bucket-util');
5+
const async = require('async');
56

67
const date = Date.now();
78
const bucket = `abortmpu${date}`;
@@ -64,6 +65,200 @@ describe('Abort MPU', () => {
6465
});
6566
});
6667

68+
describe('Abort MPU with existing object', function AbortMPUExistingObject() {
69+
this.timeout(60000);
70+
71+
withV4(sigCfg => {
72+
let bucketUtil;
73+
let s3;
74+
const bucketName = `abortmpu-test-bucket-${Date.now()}`;
75+
const objectKey = 'my-object';
76+
77+
beforeEach(done => {
78+
bucketUtil = new BucketUtility('default', sigCfg);
79+
s3 = bucketUtil.s3;
80+
s3.createBucket({ Bucket: bucketName }, err => {
81+
assert.ifError(err, `Error creating bucket: ${err}`);
82+
done();
83+
});
84+
});
85+
86+
afterEach(async () => {
87+
const data = await s3.listMultipartUploads({ Bucket: bucketName }).promise();
88+
const uploads = data.Uploads;
89+
await Promise.all(uploads.map(async upload => {
90+
try {
91+
await s3.abortMultipartUpload({
92+
Bucket: bucketName,
93+
Key: upload.Key,
94+
UploadId: upload.UploadId,
95+
}).promise();
96+
} catch (err) {
97+
if (err.code !== 'NoSuchUpload') {
98+
throw err;
99+
}
100+
// If NoSuchUpload, swallow error
101+
}
102+
}));
103+
await bucketUtil.empty(bucketName);
104+
await bucketUtil.deleteOne(bucketName);
105+
});
106+
107+
it('should not delete existing object data when aborting another MPU for same key', done => {
108+
const part1 = Buffer.from('I am part 1 of MPU 1');
109+
const part2 = Buffer.from('I am part 1 of MPU 2');
110+
let uploadId1;
111+
let uploadId2;
112+
let etag1;
113+
async.waterfall([
114+
next => {
115+
s3.createMultipartUpload({ Bucket: bucketName, Key: objectKey }, (err, data) => {
116+
assert.ifError(err, `error creating MPU 1: ${err}`);
117+
uploadId1 = data.UploadId;
118+
s3.uploadPart({
119+
Bucket: bucketName,
120+
Key: objectKey,
121+
PartNumber: 1,
122+
UploadId: uploadId1,
123+
Body: part1,
124+
}, (err, data) => {
125+
assert.ifError(err, `error uploading part for MPU 1: ${err}`);
126+
etag1 = data.ETag;
127+
s3.completeMultipartUpload({
128+
Bucket: bucketName,
129+
Key: objectKey,
130+
UploadId: uploadId1,
131+
MultipartUpload: { Parts: [{ ETag: etag1, PartNumber: 1 }] },
132+
}, err => {
133+
assert.ifError(err, `error completing MPU 1: ${err}`);
134+
next();
135+
});
136+
});
137+
});
138+
},
139+
next => {
140+
s3.getObject({ Bucket: bucketName, Key: objectKey }, (err, data) => {
141+
assert.ifError(err, `error getting object after MPU 1: ${err}`);
142+
assert.strictEqual(data.Body.toString(), part1.toString());
143+
next();
144+
});
145+
},
146+
next => {
147+
s3.createMultipartUpload({ Bucket: bucketName, Key: objectKey }, (err, data) => {
148+
assert.ifError(err, `error creating MPU 2: ${err}`);
149+
uploadId2 = data.UploadId;
150+
s3.uploadPart({
151+
Bucket: bucketName,
152+
Key: objectKey,
153+
PartNumber: 1,
154+
UploadId: uploadId2,
155+
Body: part2,
156+
}, err => {
157+
assert.ifError(err, `error uploading part for MPU 2: ${err}`);
158+
next();
159+
});
160+
});
161+
},
162+
next => {
163+
s3.abortMultipartUpload({ Bucket: bucketName, Key: objectKey, UploadId: uploadId2 }, err => {
164+
assert.ifError(err, `error aborting MPU 2: ${err}`);
165+
next();
166+
});
167+
},
168+
next => {
169+
s3.getObject({ Bucket: bucketName, Key: objectKey }, (err, data) => {
170+
assert.ifError(err, `error getting object after aborting MPU 2: ${err}`);
171+
assert.strictEqual(data.Body.toString(), part1.toString());
172+
next();
173+
});
174+
},
175+
], done);
176+
});
177+
178+
it('should not delete existing object data when aborting an old MPU for same key', done => {
179+
const part1 = Buffer.from('I am part 1 of MPU 1');
180+
const part2 = Buffer.from('I am part 1 of MPU 2');
181+
let uploadId1;
182+
let uploadId2;
183+
let etag2;
184+
async.waterfall([
185+
next => {
186+
s3.createMultipartUpload({
187+
Bucket: bucketName, Key: objectKey,
188+
}, (err, data) => {
189+
assert.ifError(err, `error creating MPU 1: ${err}`);
190+
uploadId1 = data.UploadId;
191+
s3.uploadPart({
192+
Bucket: bucketName,
193+
Key: objectKey,
194+
PartNumber: 1,
195+
UploadId: uploadId1,
196+
Body: part1,
197+
}, err => {
198+
assert.ifError(err, `error uploading part for MPU 1: ${err}`);
199+
next();
200+
});
201+
});
202+
},
203+
next => {
204+
s3.createMultipartUpload({
205+
Bucket: bucketName, Key: objectKey,
206+
}, (err, data) => {
207+
assert.ifError(err, `error creating MPU 2: ${err}`);
208+
uploadId2 = data.UploadId;
209+
s3.uploadPart({
210+
Bucket: bucketName,
211+
Key: objectKey,
212+
PartNumber: 1,
213+
UploadId: uploadId2,
214+
Body: part2,
215+
}, (err, data) => {
216+
assert.ifError(err, `error uploading part for MPU 2: ${err}`);
217+
etag2 = data.ETag;
218+
s3.completeMultipartUpload({
219+
Bucket: bucketName,
220+
Key: objectKey,
221+
UploadId: uploadId2,
222+
MultipartUpload: { Parts: [{ ETag: etag2, PartNumber: 1 }] },
223+
}, err => {
224+
assert.ifError(err, `error completing MPU 2: ${err}`);
225+
next();
226+
});
227+
});
228+
});
229+
},
230+
next => {
231+
s3.getObject({
232+
Bucket: bucketName,
233+
Key: objectKey,
234+
}, (err, data) => {
235+
assert.ifError(err, `error getting object after MPU 2: ${err}`);
236+
assert.strictEqual(data.Body.toString(), part2.toString());
237+
next();
238+
});
239+
},
240+
next => {
241+
s3.abortMultipartUpload({
242+
Bucket: bucketName,
243+
Key: objectKey,
244+
UploadId: uploadId1,
245+
}, err => {
246+
assert.ifError(err, `error aborting MPU 1: ${err}`);
247+
next();
248+
});
249+
},
250+
next => {
251+
s3.getObject({ Bucket: bucketName, Key: objectKey }, (err, data) => {
252+
assert.ifError(err, `error getting object after aborting MPU 1: ${err}`);
253+
assert.strictEqual(data.Body.toString(), part2.toString());
254+
next();
255+
});
256+
},
257+
], done);
258+
});
259+
});
260+
});
261+
67262
describe('Abort MPU - No Such Upload', () => {
68263
withV4(sigCfg => {
69264
let bucketUtil;

0 commit comments

Comments
 (0)