diff --git a/generator/.DevConfigs/252dad9f-d2a9-4d49-bff8-000924f0add4.json b/generator/.DevConfigs/252dad9f-d2a9-4d49-bff8-000924f0add4.json new file mode 100644 index 000000000000..3c1fff65ffab --- /dev/null +++ b/generator/.DevConfigs/252dad9f-d2a9-4d49-bff8-000924f0add4.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Add GetObjectResponse to TransferUtilityDownloadResponse mapping." + ] + } + ] +} \ No newline at end of file diff --git a/generator/.DevConfigs/77d980ad-8f58-4f2e-97f8-d2c8c5ba3732.json b/generator/.DevConfigs/77d980ad-8f58-4f2e-97f8-d2c8c5ba3732.json new file mode 100644 index 000000000000..be509aae4368 --- /dev/null +++ b/generator/.DevConfigs/77d980ad-8f58-4f2e-97f8-d2c8c5ba3732.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Create new UploadWithResponse API that returns response metadata information for transfer utility." + ] + } + ] +} diff --git a/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae75.json b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae75.json new file mode 100644 index 000000000000..999c11e35b3e --- /dev/null +++ b/generator/.DevConfigs/c49077d9-90b3-437f-b316-6d8d8833ae75.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "patch", + "changeLogMessages": [ + "Remove AmazonWebServiceResponse as base class for transfer utility repsonse objects." + ] + } + ] +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/AbortMultipartUploadsCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/AbortMultipartUploadsCommand.cs index 3e3ca44376df..1f7995f86cd6 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/AbortMultipartUploadsCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/AbortMultipartUploadsCommand.cs @@ -28,7 +28,7 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class AbortMultipartUploadsCommand : BaseCommand + internal partial class AbortMultipartUploadsCommand : BaseCommand { IAmazonS3 _s3Client; string _bucketName; diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/BaseCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/BaseCommand.cs index 428758fa54e6..7f4f7c6030b0 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/BaseCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/BaseCommand.cs @@ -30,14 +30,13 @@ namespace Amazon.S3.Transfer.Internal { - internal abstract partial class BaseCommand + /// + /// Generic base command that returns a typed response + /// + /// Type of response returned by the command + internal abstract partial class BaseCommand where TResponse : class { - public virtual object Return - { - get { return null; } - } - - internal GetObjectRequest ConvertToGetObjectRequest(BaseDownloadRequest request) + protected GetObjectRequest ConvertToGetObjectRequest(BaseDownloadRequest request) { GetObjectRequest getRequest = new GetObjectRequest() { diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadCommand.cs index 6359704fc0cd..f8e45d7b20fe 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadCommand.cs @@ -33,7 +33,7 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class DownloadCommand : BaseCommand + internal partial class DownloadCommand : BaseCommand { static int MAX_BACKOFF_IN_MILLISECONDS = (int)TimeSpan.FromSeconds(30).TotalMilliseconds; @@ -176,4 +176,3 @@ static ByteRange ByteRangeRemainingForDownload(string filepath) } } } - diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs index 0140554ded39..5058960d9a06 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs @@ -33,7 +33,7 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class DownloadDirectoryCommand : BaseCommand + internal partial class DownloadDirectoryCommand : BaseCommand { private readonly IAmazonS3 _s3Client; private readonly TransferUtilityDownloadDirectoryRequest _request; diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs index dca8f3f076ac..9c6374502885 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs @@ -37,7 +37,7 @@ namespace Amazon.S3.Transfer.Internal /// /// The command to manage an upload using the S3 multipart API. /// - internal partial class MultipartUploadCommand : BaseCommand + internal partial class MultipartUploadCommand : BaseCommand { IAmazonS3 _s3Client; long _partSize; diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/OpenStreamCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/OpenStreamCommand.cs index 57eab52d3f98..0fdfc64bcbae 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/OpenStreamCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/OpenStreamCommand.cs @@ -29,7 +29,7 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class OpenStreamCommand : BaseCommand + internal partial class OpenStreamCommand : BaseCommand { IAmazonS3 _s3Client; TransferUtilityOpenStreamRequest _request; @@ -59,10 +59,5 @@ internal Stream ResponseStream { get { return this._responseStream; } } - - public override object Return - { - get { return this.ResponseStream; } - } } } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs index 302ffd5e2fce..2ba493556c35 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/ResponseMapper.cs @@ -20,6 +20,7 @@ * */ +using System; using System.Collections.Generic; using Amazon.S3.Model; @@ -37,10 +38,11 @@ internal static class ResponseMapper /// /// The PutObjectResponse to map from /// A new TransferUtilityUploadResponse with mapped fields + /// Thrown when source is null internal static TransferUtilityUploadResponse MapPutObjectResponse(PutObjectResponse source) { if (source == null) - return null; + throw new ArgumentNullException(nameof(source)); var response = new TransferUtilityUploadResponse(); @@ -63,11 +65,6 @@ internal static TransferUtilityUploadResponse MapPutObjectResponse(PutObjectResp response.VersionId = source.VersionId; response.Size = source.Size; - // Copy response metadata - response.ResponseMetadata = source.ResponseMetadata; - response.ContentLength = source.ContentLength; - response.HttpStatusCode = source.HttpStatusCode; - return response; } @@ -77,10 +74,11 @@ internal static TransferUtilityUploadResponse MapPutObjectResponse(PutObjectResp /// /// The CompleteMultipartUploadResponse to map from /// A new TransferUtilityUploadResponse with mapped fields + /// Thrown when source is null internal static TransferUtilityUploadResponse MapCompleteMultipartUploadResponse(CompleteMultipartUploadResponse source) { if (source == null) - return null; + throw new ArgumentNullException(nameof(source)); var response = new TransferUtilityUploadResponse(); @@ -102,69 +100,95 @@ internal static TransferUtilityUploadResponse MapCompleteMultipartUploadResponse response.Key = source.Key; response.Location = source.Location; - // Copy response metadata - response.ResponseMetadata = source.ResponseMetadata; - response.ContentLength = source.ContentLength; - response.HttpStatusCode = source.HttpStatusCode; - return response; } + /// + /// Private helper method to populate the common properties from GetObjectResponse to the base response class. + /// Contains all the shared mapping logic for GetObjectResponse fields. + /// + /// The GetObjectResponse to map from + /// The TransferUtilityGetObjectResponseBase to populate + /// Thrown when source or target is null + private static void PopulateGetObjectResponseBase(GetObjectResponse source, TransferUtilityGetObjectResponseBase target) + { + if (source == null) + throw new ArgumentNullException(nameof(source)); + if (target == null) + throw new ArgumentNullException(nameof(target)); + + // Map all fields as defined in mapping.json "Conversion" -> "GetObjectResponse" -> "DownloadResponse" + target.AcceptRanges = source.AcceptRanges; + target.BucketKeyEnabled = source.BucketKeyEnabled.GetValueOrDefault(); + target.ChecksumCRC32 = source.ChecksumCRC32; + target.ChecksumCRC32C = source.ChecksumCRC32C; + target.ChecksumCRC64NVME = source.ChecksumCRC64NVME; + target.ChecksumSHA1 = source.ChecksumSHA1; + target.ChecksumSHA256 = source.ChecksumSHA256; + target.ChecksumType = source.ChecksumType; + target.ContentRange = source.ContentRange; + target.Headers = source.Headers; + target.DeleteMarker = source.DeleteMarker; + target.ETag = source.ETag; + target.Expiration = source.Expiration; + target.ExpiresString = source.ExpiresString; + target.LastModified = source.LastModified; + target.Metadata = source.Metadata; + target.MissingMeta = source.MissingMeta; + target.ObjectLockLegalHoldStatus = source.ObjectLockLegalHoldStatus; + target.ObjectLockMode = source.ObjectLockMode; + target.ObjectLockRetainUntilDate = source.ObjectLockRetainUntilDate; + target.PartsCount = source.PartsCount; + target.ReplicationStatus = source.ReplicationStatus; + target.RequestCharged = source.RequestCharged; + target.RestoreExpiration = source.RestoreExpiration; + target.RestoreInProgress = source.RestoreInProgress; + target.ServerSideEncryptionCustomerMethod = source.ServerSideEncryptionCustomerMethod; + target.ServerSideEncryptionCustomerProvidedKeyMD5 = source.ServerSideEncryptionCustomerProvidedKeyMD5; + target.ServerSideEncryptionKeyManagementServiceKeyId = source.ServerSideEncryptionKeyManagementServiceKeyId; + target.ServerSideEncryptionMethod = source.ServerSideEncryptionMethod; + target.StorageClass = source.StorageClass; + target.TagCount = source.TagCount; + target.VersionId = source.VersionId; + target.WebsiteRedirectLocation = source.WebsiteRedirectLocation; + } + /// /// Maps a GetObjectResponse to TransferUtilityDownloadResponse. /// Uses the field mappings defined in mapping.json "Conversion" -> "GetObjectResponse" -> "DownloadResponse". /// /// The GetObjectResponse to map from /// A new TransferUtilityDownloadResponse with mapped fields + /// Thrown when source is null internal static TransferUtilityDownloadResponse MapGetObjectResponse(GetObjectResponse source) { if (source == null) - return null; + throw new ArgumentNullException(nameof(source)); var response = new TransferUtilityDownloadResponse(); + PopulateGetObjectResponseBase(source, response); + return response; + } - // Map all fields as defined in mapping.json "Conversion" -> "GetObjectResponse" -> "DownloadResponse" - response.AcceptRanges = source.AcceptRanges; - response.BucketKeyEnabled = source.BucketKeyEnabled.GetValueOrDefault(); - response.ChecksumCRC32 = source.ChecksumCRC32; - response.ChecksumCRC32C = source.ChecksumCRC32C; - response.ChecksumCRC64NVME = source.ChecksumCRC64NVME; - response.ChecksumSHA1 = source.ChecksumSHA1; - response.ChecksumSHA256 = source.ChecksumSHA256; - response.ChecksumType = source.ChecksumType; - response.ContentRange = source.ContentRange; - response.Headers = source.Headers; - response.DeleteMarker = source.DeleteMarker; - response.ETag = source.ETag; - response.Expiration = source.Expiration; - response.ExpiresString = source.ExpiresString; - response.LastModified = source.LastModified; - response.Metadata = source.Metadata; - response.MissingMeta = source.MissingMeta; - response.ObjectLockLegalHoldStatus = source.ObjectLockLegalHoldStatus; - response.ObjectLockMode = source.ObjectLockMode; - response.ObjectLockRetainUntilDate = source.ObjectLockRetainUntilDate; - response.PartsCount = source.PartsCount; - response.ReplicationStatus = source.ReplicationStatus; - response.RequestCharged = source.RequestCharged; - response.RestoreExpiration = source.RestoreExpiration; - response.RestoreInProgress = source.RestoreInProgress; - response.ServerSideEncryptionCustomerMethod = source.ServerSideEncryptionCustomerMethod; - response.ServerSideEncryptionCustomerProvidedKeyMD5 = source.ServerSideEncryptionCustomerProvidedKeyMD5; - response.ServerSideEncryptionKeyManagementServiceKeyId = source.ServerSideEncryptionKeyManagementServiceKeyId; - response.ServerSideEncryptionMethod = source.ServerSideEncryptionMethod; - response.StorageClass = source.StorageClass; - response.TagCount = source.TagCount; - response.VersionId = source.VersionId; - response.WebsiteRedirectLocation = source.WebsiteRedirectLocation; + /// + /// Maps a GetObjectResponse to TransferUtilityOpenStreamResponse. + /// Uses the same field mappings as DownloadResponse plus the ResponseStream property. + /// + /// The GetObjectResponse to map from + /// A new TransferUtilityOpenStreamResponse with mapped fields + /// Thrown when source is null + internal static TransferUtilityOpenStreamResponse MapGetObjectResponseToOpenStream(GetObjectResponse source) + { + if (source == null) + throw new ArgumentNullException(nameof(source)); - // Copy response metadata - response.ResponseMetadata = source.ResponseMetadata; - response.ContentLength = source.ContentLength; - response.HttpStatusCode = source.HttpStatusCode; + var response = new TransferUtilityOpenStreamResponse(); + PopulateGetObjectResponseBase(source, response); + response.ResponseStream = source.ResponseStream; - return response; + return response; } + } } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/SimpleUploadCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/SimpleUploadCommand.cs index d8de23a6145b..799b36fcde28 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/SimpleUploadCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/SimpleUploadCommand.cs @@ -36,7 +36,7 @@ namespace Amazon.S3.Transfer.Internal /// /// This command is for doing regular PutObject requests. /// - internal partial class SimpleUploadCommand : BaseCommand + internal partial class SimpleUploadCommand : BaseCommand { IAmazonS3 _s3Client; TransferUtilityConfig _config; diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs index e4be9b27aa74..693a9ef8325a 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs @@ -32,7 +32,7 @@ namespace Amazon.S3.Transfer.Internal /// This command files all the files that meets the criteria specified in the TransferUtilityUploadDirectoryRequest request /// and uploads them. /// - internal partial class UploadDirectoryCommand : BaseCommand + internal partial class UploadDirectoryCommand : BaseCommand { TransferUtilityUploadDirectoryRequest _request; TransferUtility _utility; diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/AbortMultipartUploadsCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/AbortMultipartUploadsCommand.async.cs index ed3dd81903ea..e559239ba6cd 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/AbortMultipartUploadsCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/AbortMultipartUploadsCommand.async.cs @@ -24,7 +24,7 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class AbortMultipartUploadsCommand : BaseCommand + internal partial class AbortMultipartUploadsCommand : BaseCommand { TransferUtilityConfig _config; @@ -36,7 +36,7 @@ internal AbortMultipartUploadsCommand(IAmazonS3 s3Client, string bucketName, Dat this._config = config; } - public override async Task ExecuteAsync(CancellationToken cancellationToken) + public override async Task ExecuteAsync(CancellationToken cancellationToken) { if (string.IsNullOrEmpty(this._bucketName)) { @@ -88,6 +88,8 @@ await asyncThrottler.WaitAsync(cancellationToken) await WhenAllOrFirstExceptionAsync(pendingTasks,cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); + + return new TransferUtilityAbortMultipartUploadsResponse(); } finally { diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs index f9591f6d1d68..65ee8d8cb4c8 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs @@ -24,9 +24,12 @@ namespace Amazon.S3.Transfer.Internal { - internal abstract partial class BaseCommand + internal abstract partial class BaseCommand where TResponse : class { - public abstract Task ExecuteAsync(CancellationToken cancellationToken); + /// + /// Executes the command and returns a typed response + /// + public abstract Task ExecuteAsync(CancellationToken cancellationToken); /// /// Waits for all of the tasks to complete or till any task fails or is canceled. @@ -80,7 +83,7 @@ await completedTask } } - protected static async Task ExecuteCommandAsync(BaseCommand command, CancellationTokenSource internalCts, SemaphoreSlim throttler) + protected static async Task ExecuteCommandAsync(BaseCommand command, CancellationTokenSource internalCts, SemaphoreSlim throttler) where T : class { try { diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/DownloadCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/DownloadCommand.async.cs index 3e536a4bb607..6baef9262774 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/DownloadCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/DownloadCommand.async.cs @@ -28,9 +28,9 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class DownloadCommand : BaseCommand + internal partial class DownloadCommand : BaseCommand { - public override async Task ExecuteAsync(CancellationToken cancellationToken) + public override async Task ExecuteAsync(CancellationToken cancellationToken) { ValidateRequest(); GetObjectRequest getRequest = ConvertToGetObjectRequest(this._request); @@ -130,6 +130,9 @@ await response.WriteResponseStreamToFileAsync(this._request.FilePath, true, canc } WaitBeforeRetry(retries); } while (shouldRetry); + + // TODO map and return response + return new TransferUtilityDownloadResponse(); } private static bool HandleExceptionForHttpClient(Exception exception, int retries, int maxRetries) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs index dd747e3d83e8..3f29336f0fe0 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs @@ -27,20 +27,20 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class MultipartUploadCommand : BaseCommand + internal partial class MultipartUploadCommand : BaseCommand { public SemaphoreSlim AsyncThrottler { get; set; } Dictionary _expectedUploadParts = new Dictionary(); - public override async Task ExecuteAsync(CancellationToken cancellationToken) + public override async Task ExecuteAsync(CancellationToken cancellationToken) { // Fire transfer initiated event FIRST, before choosing path FireTransferInitiatedEvent(); if ( (this._fileTransporterRequest.InputStream != null && !this._fileTransporterRequest.InputStream.CanSeek) || this._fileTransporterRequest.ContentLength == -1) { - await UploadUnseekableStreamAsync(this._fileTransporterRequest, cancellationToken).ConfigureAwait(false); + return await UploadUnseekableStreamAsync(this._fileTransporterRequest, cancellationToken).ConfigureAwait(false); } else { @@ -144,6 +144,7 @@ await localThrottler.WaitAsync(cancellationToken) var mappedResponse = ResponseMapper.MapCompleteMultipartUploadResponse(completeResponse); FireTransferCompletedEvent(mappedResponse); + return mappedResponse; } catch (Exception e) { @@ -275,7 +276,7 @@ private void AbortMultipartUpload(string uploadId) Logger.InfoFormat("Error attempting to abort multipart for key {0}: {1}", this._fileTransporterRequest.Key, e.Message); } } - private async Task UploadUnseekableStreamAsync(TransferUtilityUploadRequest request, CancellationToken cancellationToken = default(CancellationToken)) + private async Task UploadUnseekableStreamAsync(TransferUtilityUploadRequest request, CancellationToken cancellationToken = default(CancellationToken)) { cancellationToken.ThrowIfCancellationRequested(); @@ -376,6 +377,7 @@ private void AbortMultipartUpload(string uploadId) var mappedResponse = ResponseMapper.MapCompleteMultipartUploadResponse(completeResponse); FireTransferCompletedEvent(mappedResponse); + return mappedResponse; } } catch (Exception ex) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamCommand.async.cs index 192560f837ee..8c954d256fab 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamCommand.async.cs @@ -24,14 +24,16 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class OpenStreamCommand : BaseCommand + internal partial class OpenStreamCommand : BaseCommand { - public override async Task ExecuteAsync(CancellationToken cancellationToken) + public override async Task ExecuteAsync(CancellationToken cancellationToken) { var getRequest = ConstructRequest(); var response = await _s3Client.GetObjectAsync(getRequest, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); _responseStream = response.ResponseStream; + // TODO map and return response + return new TransferUtilityOpenStreamResponse(); } } } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/SimpleUploadCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/SimpleUploadCommand.async.cs index 51680eaaba09..1d936f0bdf5c 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/SimpleUploadCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/SimpleUploadCommand.async.cs @@ -24,11 +24,11 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class SimpleUploadCommand : BaseCommand + internal partial class SimpleUploadCommand : BaseCommand { public SemaphoreSlim AsyncThrottler { get; set; } - public override async Task ExecuteAsync(CancellationToken cancellationToken) + public override async Task ExecuteAsync(CancellationToken cancellationToken) { try { @@ -47,6 +47,8 @@ await this.AsyncThrottler.WaitAsync(cancellationToken) var mappedResponse = ResponseMapper.MapPutObjectResponse(response); FireTransferCompletedEvent(mappedResponse); + + return mappedResponse; } catch (Exception) { diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs index a160bc1504f4..be3fb4f0ae33 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs @@ -25,7 +25,7 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class DownloadDirectoryCommand : BaseCommand + internal partial class DownloadDirectoryCommand : BaseCommand { TransferUtilityConfig _config; @@ -38,7 +38,7 @@ internal DownloadDirectoryCommand(IAmazonS3 s3Client, TransferUtilityDownloadDir this._config = config; } - public override async Task ExecuteAsync(CancellationToken cancellationToken) + public override async Task ExecuteAsync(CancellationToken cancellationToken) { ValidateRequest(); EnsureDirectoryExists(new DirectoryInfo(this._request.LocalDirectory)); @@ -112,6 +112,8 @@ await asyncThrottler.WaitAsync(cancellationToken) } await WhenAllOrFirstExceptionAsync(pendingTasks, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); + + return new TransferUtilityDownloadDirectoryResponse(); } finally { diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs index 75e1744d5435..10f09be9ed07 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs @@ -23,11 +23,11 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class UploadDirectoryCommand : BaseCommand + internal partial class UploadDirectoryCommand : BaseCommand { public bool UploadFilesConcurrently { get; set; } - public override async Task ExecuteAsync(CancellationToken cancellationToken) + public override async Task ExecuteAsync(CancellationToken cancellationToken) { string prefix = GetKeyPrefix(); @@ -87,6 +87,8 @@ await WhenAllOrFirstExceptionAsync(pendingTasks, cancellationToken) if (asyncThrottler != null) asyncThrottler.Dispose(); } + + return new TransferUtilityUploadDirectoryResponse(); } private Task GetFiles(string path, string searchPattern, SearchOption searchOption, CancellationToken cancellationToken) diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtility.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtility.cs index 472a5933ba28..f4dde2c232e2 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtility.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtility.cs @@ -386,7 +386,7 @@ private static TransferUtilityUploadRequest ConstructUploadRequest(Stream stream }; } - internal BaseCommand GetUploadCommand(TransferUtilityUploadRequest request) + internal BaseCommand GetUploadCommand(TransferUtilityUploadRequest request) { validate(request); diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityAbortMultipartUploadsResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityAbortMultipartUploadsResponse.cs new file mode 100644 index 000000000000..6c63c4b0a75b --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityAbortMultipartUploadsResponse.cs @@ -0,0 +1,35 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using Amazon.Runtime; + +namespace Amazon.S3.Transfer +{ + /// + /// Response object for Transfer Utility abort multipart uploads operations. + /// Contains response metadata from abort multipart uploads operations. + /// + public class TransferUtilityAbortMultipartUploadsResponse + { + // Empty placeholder class - properties will be added in future iterations + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryResponse.cs new file mode 100644 index 000000000000..6df0c1c5a619 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryResponse.cs @@ -0,0 +1,26 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +using Amazon.Runtime; + +namespace Amazon.S3.Transfer +{ + /// + /// Contains the details returned from a Transfer Utility download directory operation. + /// + public class TransferUtilityDownloadDirectoryResponse + { + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadResponse.cs index d10c72f47c0f..36474a64c0aa 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadResponse.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadResponse.cs @@ -31,270 +31,7 @@ namespace Amazon.S3.Transfer /// Response object for Transfer Utility download operations. /// Contains response metadata from download operations. /// - public class TransferUtilityDownloadResponse : AmazonWebServiceResponse + public class TransferUtilityDownloadResponse : TransferUtilityGetObjectResponseBase { - /// - /// Gets and sets the AcceptRanges property. - /// - public string AcceptRanges { get; set; } - - /// - /// Gets and sets the property BucketKeyEnabled. - /// - /// Indicates whether the object uses an S3 Bucket Key for server-side encryption with - /// Amazon Web Services KMS (SSE-KMS). - /// - /// - public bool? BucketKeyEnabled { get; set; } - - /// - /// The collection of headers for the response. - /// - public HeadersCollection Headers { get; set; } - - /// - /// Gets and sets the property ChecksumCRC32. - /// - /// The Base64 encoded, 32-bit CRC-32 checksum of the object. - /// - /// - public string ChecksumCRC32 { get; set; } - - /// - /// Gets and sets the property ChecksumCRC32C. - /// - /// The Base64 encoded, 32-bit CRC-32C checksum of the object. - /// - /// - public string ChecksumCRC32C { get; set; } - - /// - /// Gets and sets the property ChecksumCRC64NVME. - /// - /// The Base64 encoded, 64-bit CRC-64NVME checksum of the object. - /// - /// - public string ChecksumCRC64NVME { get; set; } - - /// - /// Gets and sets the property ChecksumSHA1. - /// - /// The Base64 encoded, 160-bit SHA-1 digest of the object. - /// - /// - public string ChecksumSHA1 { get; set; } - - /// - /// Gets and sets the property ChecksumSHA256. - /// - /// The Base64 encoded, 256-bit SHA-256 checksum of the object. - /// - /// - public string ChecksumSHA256 { get; set; } - - /// - /// Gets and sets the property ChecksumType. - /// - /// The checksum type used to calculate the object-level checksum. - /// - /// - public ChecksumType ChecksumType { get; set; } - - /// - /// Gets and sets the ContentRange property. - /// - public string ContentRange { get; set; } - - /// - /// Gets and sets the DeleteMarker property. - /// - /// Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. - /// - /// - public string DeleteMarker { get; set; } - - /// - /// Gets and sets the ETag property. - /// - /// An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL. - /// - /// - public string ETag { get; set; } - - /// - /// Gets and sets the property Expiration. - /// - /// If the object expiration is configured, this will contain the expiration date and rule ID. - /// - /// - public Expiration Expiration { get; set; } - - /// - /// Gets and sets the ExpiresString property. - /// - /// The date and time at which the object is no longer cacheable (string format). - /// - /// - public string ExpiresString { get; set; } - - /// - /// Gets and sets the property LastModified. - /// - /// Date and time when the object was last modified. - /// - /// - public DateTime? LastModified { get; set; } - - /// - /// Gets and sets the Metadata property. - /// - /// The collection of metadata for the object. - /// - /// - public MetadataCollection Metadata { get; set; } - - /// - /// Gets and sets the property MissingMeta. - /// - /// This is set to the number of metadata entries not returned in the headers that are - /// prefixed with x-amz-meta-. - /// - /// - public int? MissingMeta { get; set; } - - /// - /// Gets and sets the property ObjectLockLegalHoldStatus. - /// - /// Indicates whether this object has an active legal hold. - /// - /// - public ObjectLockLegalHoldStatus ObjectLockLegalHoldStatus { get; set; } - - /// - /// Gets and sets the property ObjectLockMode. - /// - /// The Object Lock mode that's currently in place for this object. - /// - /// - public ObjectLockMode ObjectLockMode { get; set; } - - /// - /// Gets and sets the property ObjectLockRetainUntilDate. - /// - /// The date and time when this object's Object Lock will expire. - /// - /// - public DateTime? ObjectLockRetainUntilDate { get; set; } - - /// - /// Gets and sets the PartsCount property. - /// - /// The number of parts this object has. - /// - /// - public int? PartsCount { get; set; } - - /// - /// Gets and sets the property ReplicationStatus. - /// - /// Amazon S3 can return this if your request involves a bucket that is either a source - /// or destination in a replication rule. - /// - /// - public ReplicationStatus ReplicationStatus { get; set; } - - /// - /// Gets and sets the RequestCharged property. - /// - /// If present, indicates that the requester was successfully charged for the request. - /// - /// - public RequestCharged RequestCharged { get; set; } - - /// - /// Gets and sets the RestoreExpiration property. - /// - /// RestoreExpiration will be set for objects that have been restored from Amazon Glacier. - /// It indicates for those objects how long the restored object will exist. - /// - /// - public DateTime? RestoreExpiration { get; set; } - - /// - /// Gets and sets the RestoreInProgress - /// - /// Will be true when the object is in the process of being restored from Amazon Glacier. - /// - /// - /// This functionality is not supported for directory buckets. - /// Only the S3 Express One Zone storage class is supported by directory buckets to store objects. - /// - /// - public bool? RestoreInProgress { get; set; } - - /// - /// Gets and sets the ServerSideEncryptionCustomerMethod property. - /// - /// The server-side encryption algorithm to be used with the customer provided key. - /// - /// - public ServerSideEncryptionCustomerMethod ServerSideEncryptionCustomerMethod { get; set; } - - /// - /// Gets and sets the ServerSideEncryptionCustomerProvidedKeyMD5 property. - /// - /// The MD5 server-side encryption of the customer-provided encryption key. - /// - /// - public string ServerSideEncryptionCustomerProvidedKeyMD5 { get; set; } - - /// - /// Gets and sets the ServerSideEncryptionKeyManagementServiceKeyId property. - /// - /// If present, indicates the ID of the KMS key that was used for object encryption. - /// - /// - public string ServerSideEncryptionKeyManagementServiceKeyId { get; set; } - - /// - /// Gets and sets the ServerSideEncryptionMethod property. - /// - /// The server-side encryption algorithm used when you store this object in Amazon S3. - /// - /// - public ServerSideEncryptionMethod ServerSideEncryptionMethod { get; set; } - - /// - /// Gets and sets the property StorageClass. - /// - /// Provides storage class information of the object. - /// - /// - public S3StorageClass StorageClass { get; set; } - - /// - /// Gets and sets the property TagCount. - /// - /// The number of tags, if any, on the object. - /// - /// - public int TagCount { get; set; } - - /// - /// Gets and sets the property VersionId. - /// - /// Version ID of the object. - /// - /// - public string VersionId { get; set; } - - /// - /// Gets and sets the property WebsiteRedirectLocation. - /// - /// If the bucket is configured as a website, redirects requests for this object to another - /// object in the same bucket or to an external URL. - /// - /// - public string WebsiteRedirectLocation { get; set; } } } diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityGetObjectResponseBase.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityGetObjectResponseBase.cs new file mode 100644 index 000000000000..431d498afe9e --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityGetObjectResponseBase.cs @@ -0,0 +1,293 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using System; +using System.Collections.Generic; +using Amazon.Runtime; +using Amazon.S3.Model; + +namespace Amazon.S3.Transfer +{ + /// + /// Base response object for Transfer Utility operations that retrieve S3 object metadata. + /// Contains response metadata from S3 GetObject operations. + /// + public abstract class TransferUtilityGetObjectResponseBase + { + /// + /// Gets and sets the AcceptRanges property. + /// + public string AcceptRanges { get; set; } + + /// + /// Gets and sets the property BucketKeyEnabled. + /// + /// Indicates whether the object uses an S3 Bucket Key for server-side encryption with + /// Amazon Web Services KMS (SSE-KMS). + /// + /// + public bool? BucketKeyEnabled { get; set; } + + /// + /// The collection of headers for the response. + /// + public HeadersCollection Headers { get; set; } + + /// + /// Gets and sets the property ChecksumCRC32. + /// + /// The Base64 encoded, 32-bit CRC-32 checksum of the object. + /// + /// + public string ChecksumCRC32 { get; set; } + + /// + /// Gets and sets the property ChecksumCRC32C. + /// + /// The Base64 encoded, 32-bit CRC-32C checksum of the object. + /// + /// + public string ChecksumCRC32C { get; set; } + + /// + /// Gets and sets the property ChecksumCRC64NVME. + /// + /// The Base64 encoded, 64-bit CRC-64NVME checksum of the object. + /// + /// + public string ChecksumCRC64NVME { get; set; } + + /// + /// Gets and sets the property ChecksumSHA1. + /// + /// The Base64 encoded, 160-bit SHA-1 digest of the object. + /// + /// + public string ChecksumSHA1 { get; set; } + + /// + /// Gets and sets the property ChecksumSHA256. + /// + /// The Base64 encoded, 256-bit SHA-256 checksum of the object. + /// + /// + public string ChecksumSHA256 { get; set; } + + /// + /// Gets and sets the property ChecksumType. + /// + /// The checksum type used to calculate the object-level checksum. + /// + /// + public ChecksumType ChecksumType { get; set; } + + /// + /// Gets and sets the ContentRange property. + /// + public string ContentRange { get; set; } + + /// + /// Gets and sets the DeleteMarker property. + /// + /// Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. + /// + /// + public string DeleteMarker { get; set; } + + /// + /// Gets and sets the ETag property. + /// + /// An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL. + /// + /// + public string ETag { get; set; } + + /// + /// Gets and sets the property Expiration. + /// + /// If the object expiration is configured, this will contain the expiration date and rule ID. + /// + /// + public Expiration Expiration { get; set; } + + /// + /// Gets and sets the ExpiresString property. + /// + /// The date and time at which the object is no longer cacheable (string format). + /// + /// + public string ExpiresString { get; set; } + + /// + /// Gets and sets the property LastModified. + /// + /// Date and time when the object was last modified. + /// + /// + public DateTime? LastModified { get; set; } + + /// + /// Gets and sets the Metadata property. + /// + /// The collection of metadata for the object. + /// + /// + public MetadataCollection Metadata { get; set; } + + /// + /// Gets and sets the property MissingMeta. + /// + /// This is set to the number of metadata entries not returned in the headers that are + /// prefixed with x-amz-meta-. + /// + /// + public int? MissingMeta { get; set; } + + /// + /// Gets and sets the property ObjectLockLegalHoldStatus. + /// + /// Indicates whether this object has an active legal hold. + /// + /// + public ObjectLockLegalHoldStatus ObjectLockLegalHoldStatus { get; set; } + + /// + /// Gets and sets the property ObjectLockMode. + /// + /// The Object Lock mode that's currently in place for this object. + /// + /// + public ObjectLockMode ObjectLockMode { get; set; } + + /// + /// Gets and sets the property ObjectLockRetainUntilDate. + /// + /// The date and time when this object's Object Lock will expire. + /// + /// + public DateTime? ObjectLockRetainUntilDate { get; set; } + + /// + /// Gets and sets the PartsCount property. + /// + /// The number of parts this object has. + /// + /// + public int? PartsCount { get; set; } + + /// + /// Gets and sets the property ReplicationStatus. + /// + /// Amazon S3 can return this if your request involves a bucket that is either a source + /// or destination in a replication rule. + /// + /// + public ReplicationStatus ReplicationStatus { get; set; } + + /// + /// Gets and sets the RequestCharged property. + /// + /// If present, indicates that the requester was successfully charged for the request. + /// + /// + public RequestCharged RequestCharged { get; set; } + + /// + /// Gets and sets the RestoreExpiration property. + /// + /// RestoreExpiration will be set for objects that have been restored from Amazon Glacier. + /// It indicates for those objects how long the restored object will exist. + /// + /// + public DateTime? RestoreExpiration { get; set; } + + /// + /// + /// + public bool? RestoreInProgress { get; set; } + + /// + /// Gets and sets the ServerSideEncryptionCustomerMethod property. + /// + /// The server-side encryption algorithm to be used with the customer provided key. + /// + /// + public ServerSideEncryptionCustomerMethod ServerSideEncryptionCustomerMethod { get; set; } + + /// + /// Gets and sets the ServerSideEncryptionCustomerProvidedKeyMD5 property. + /// + /// The MD5 server-side encryption of the customer-provided encryption key. + /// + /// + public string ServerSideEncryptionCustomerProvidedKeyMD5 { get; set; } + + /// + /// Gets and sets the ServerSideEncryptionKeyManagementServiceKeyId property. + /// + /// If present, indicates the ID of the KMS key that was used for object encryption. + /// + /// + public string ServerSideEncryptionKeyManagementServiceKeyId { get; set; } + + /// + /// Gets and sets the ServerSideEncryptionMethod property. + /// + /// The server-side encryption algorithm used when you store this object in Amazon S3. + /// + /// + public ServerSideEncryptionMethod ServerSideEncryptionMethod { get; set; } + + /// + /// Gets and sets the property StorageClass. + /// + /// Provides storage class information of the object. + /// + /// + public S3StorageClass StorageClass { get; set; } + + /// + /// Gets and sets the property TagCount. + /// + /// The number of tags, if any, on the object. + /// + /// + public int? TagCount { get; set; } + + /// + /// Gets and sets the property VersionId. + /// + /// Version ID of the object. + /// + /// + public string VersionId { get; set; } + + /// + /// Gets and sets the property WebsiteRedirectLocation. + /// + /// If the bucket is configured as a website, redirects requests for this object to another + /// object in the same bucket or to an external URL. + /// + /// + public string WebsiteRedirectLocation { get; set; } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityOpenStreamResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityOpenStreamResponse.cs new file mode 100644 index 000000000000..df2f57bce35f --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityOpenStreamResponse.cs @@ -0,0 +1,97 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using System; +using System.IO; +using Amazon.Runtime; + +namespace Amazon.S3.Transfer +{ + /// + /// Response object for Transfer Utility open stream operations. + /// Contains the stream and response metadata from open stream operations. + /// + public class TransferUtilityOpenStreamResponse : TransferUtilityGetObjectResponseBase, IDisposable + { + private bool disposed; + private Stream responseStream; + + #region Dispose Pattern + + /// + /// Disposes of all managed and unmanaged resources. + /// + public void Dispose() + { + Dispose(true); + GC.SuppressFinalize(this); + } + + /// + /// Releases the unmanaged resources used by the TransferUtilityOpenStreamResponse and optionally disposes of the managed resources. + /// + /// true to release both managed and unmanaged resources; false to releases only unmanaged resources. + protected virtual void Dispose(bool disposing) + { + if (!this.disposed) + { + if (disposing) + { + // Remove Managed Resources + // I.O.W. remove resources that have to be explicitly + // "Dispose"d or Closed. For an S3 Response, these are: + // 1. The Response Stream for GET Object requests + // 2. The HttpResponse object for GET Object requests + if (responseStream != null) + { + responseStream.Dispose(); + } + } + + responseStream = null; + disposed = true; + } + } + + #endregion + + /// + /// Gets and sets the ResponseStream property. + /// + /// An open stream read from to get the data from S3. In order to + /// use this stream without leaking the underlying resource, please + /// wrap access to the stream within a using block. + /// + /// + public Stream ResponseStream + { + get { return this.responseStream; } + set { this.responseStream = value; } + } + + // Check to see if ResponseStream property is set + internal bool IsSetResponseStream() + { + return this.responseStream != null; + } + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryResponse.cs new file mode 100644 index 000000000000..94f32558d1fb --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryResponse.cs @@ -0,0 +1,35 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using Amazon.Runtime; + +namespace Amazon.S3.Transfer +{ + /// + /// Response object for Transfer Utility upload directory operations. + /// Contains response metadata from upload directory operations. + /// + public class TransferUtilityUploadDirectoryResponse + { + // Empty placeholder class - properties will be added in future iterations + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadResponse.cs index fc3218f0411e..e7361bfd629f 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadResponse.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadResponse.cs @@ -32,7 +32,7 @@ namespace Amazon.S3.Transfer /// Contains unified response fields from both simple uploads (PutObjectResponse) /// and multipart uploads (CompleteMultipartUploadResponse). /// - public class TransferUtilityUploadResponse : AmazonWebServiceResponse + public class TransferUtilityUploadResponse { private bool? _bucketKeyEnabled; private string _bucketName; diff --git a/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs b/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs index 6622b2622e38..6ded5ccc05b1 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs @@ -88,6 +88,7 @@ public partial class TransferUtility : ITransferUtility /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. /// /// The task object representing the asynchronous operation. + [Obsolete("Use UploadWithResponseAsync instead which allows you to upload files and also returns response metadata.", error: false)] public async Task UploadAsync(string filePath, string bucketName, CancellationToken cancellationToken = default(CancellationToken)) { var request = ConstructUploadRequest(filePath, bucketName); @@ -129,6 +130,7 @@ public partial class TransferUtility : ITransferUtility /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. /// /// The task object representing the asynchronous operation. + [Obsolete("Use UploadWithResponseAsync instead which allows you to upload files and also returns response metadata.", error: false)] public async Task UploadAsync(string filePath, string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)) { var request = ConstructUploadRequest(filePath, bucketName,key); @@ -169,6 +171,7 @@ public partial class TransferUtility : ITransferUtility /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. /// /// The task object representing the asynchronous operation. + [Obsolete("Use UploadWithResponseAsync instead which allows you to upload files and also returns response metadata.", error: false)] public async Task UploadAsync(Stream stream, string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)) { var request = ConstructUploadRequest(stream, bucketName, key); @@ -208,6 +211,7 @@ public partial class TransferUtility : ITransferUtility /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. /// /// The task object representing the asynchronous operation. + [Obsolete("Use UploadWithResponseAsync instead which allows you to upload files and also returns response metadata.", error: false)] public async Task UploadAsync(TransferUtilityUploadRequest request, CancellationToken cancellationToken = default(CancellationToken)) { using(CreateSpan(nameof(UploadAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) @@ -217,6 +221,167 @@ public partial class TransferUtility : ITransferUtility await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); } } + + /// + /// Uploads the specified file and returns response metadata. + /// The object key is derived from the file's name. + /// Multiple threads are used to read the file and perform multiple uploads in parallel. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// For nonseekable streams or streams with an unknown length, TransferUtility will use multipart upload and buffer up to a part size in memory + /// until the final part is reached and complete the upload. The buffer for the multipart upload is controlled by S3Constants.MinPartSize + /// and the default value is 5 megabytes. You can also adjust the read buffer size(i.e.how many bytes to read before writing to the part buffer) + /// via the BufferSize property on the ClientConfig.The default value for this is 8192 bytes. + /// + /// + /// + /// The file path of the file to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + public async Task UploadWithResponseAsync(string filePath, string bucketName, CancellationToken cancellationToken = default(CancellationToken)) + { + var request = ConstructUploadRequest(filePath, bucketName); + return await UploadWithResponseAsync(request, cancellationToken).ConfigureAwait(false); + } + + /// + /// Uploads the specified file and returns response metadata. + /// Multiple threads are used to read the file and perform multiple uploads in parallel. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// For nonseekable streams or streams with an unknown length, TransferUtility will use multipart upload and buffer up to a part size in memory + /// until the final part is reached and complete the upload. The buffer for the multipart upload is controlled by S3Constants.MinPartSize + /// and the default value is 5 megabytes. You can also adjust the read buffer size(i.e.how many bytes to read before writing to the part buffer) + /// via the BufferSize property on the ClientConfig.The default value for this is 8192 bytes. + /// + /// + /// + /// The file path of the file to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. + /// + /// + /// The key under which the Amazon S3 object is stored. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + public async Task UploadWithResponseAsync(string filePath, string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)) + { + var request = ConstructUploadRequest(filePath, bucketName, key); + return await UploadWithResponseAsync(request, cancellationToken).ConfigureAwait(false); + } + + /// + /// Uploads the contents of the specified stream and returns response metadata. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// For nonseekable streams or streams with an unknown length, TransferUtility will use multipart upload and buffer up to a part size in memory + /// until the final part is reached and complete the upload. The buffer for the multipart upload is controlled by S3Constants.MinPartSize + /// and the default value is 5 megabytes. You can also adjust the read buffer size(i.e.how many bytes to read before writing to the part buffer) + /// via the BufferSize property on the ClientConfig.The default value for this is 8192 bytes. + /// + /// + /// + /// The stream to read to obtain the content to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the stream to. + /// + /// + /// The key under which the Amazon S3 object is stored. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + public async Task UploadWithResponseAsync(Stream stream, string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)) + { + var request = ConstructUploadRequest(stream, bucketName, key); + return await UploadWithResponseAsync(request, cancellationToken).ConfigureAwait(false); + } + + /// + /// Uploads the file or stream specified by the request and returns response metadata. + /// To track the progress of the upload, + /// add an event listener to the request's UploadProgressEvent. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// For nonseekable streams or streams with an unknown length, TransferUtility will use multipart upload and buffer up to a part size in memory + /// until the final part is reached and complete the upload. The part size buffer for the multipart upload is controlled by the partSize + /// specified on the TransferUtilityUploadRequest, and if none is specified it defaults to S3Constants.MinPartSize (5 megabytes). + /// You can also adjust the read buffer size (i.e. how many bytes to read before adding it to the + /// part buffer) via the BufferSize property on the ClientConfig. The default value for this is 8192 bytes. + /// + /// + /// + /// Contains all the parameters required to upload to Amazon S3. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + public async Task UploadWithResponseAsync(TransferUtilityUploadRequest request, CancellationToken cancellationToken = default(CancellationToken)) + { + using(CreateSpan(nameof(UploadWithResponseAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) + { + CheckForBlockedArn(request.BucketName, "Upload"); + var command = GetUploadCommand(request, null); + return await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); + } + } #endregion #region AbortMultipartUploads @@ -321,7 +486,7 @@ public partial class TransferUtility : ITransferUtility #endregion - internal BaseCommand GetUploadCommand(TransferUtilityUploadRequest request, SemaphoreSlim asyncThrottler) + internal BaseCommand GetUploadCommand(TransferUtilityUploadRequest request, SemaphoreSlim asyncThrottler) { validate(request); if (IsMultipartUpload(request)) diff --git a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs index 659ddc9c7cae..36423834f18d 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs @@ -167,6 +167,7 @@ public void UploadDirectory(TransferUtilityUploadDirectoryRequest request) /// /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. /// + [Obsolete("Use UploadWithResponse instead which allows you to upload files and also returns response metadata.", error: false)] public void Upload(string filePath, string bucketName) { try @@ -204,6 +205,7 @@ public void Upload(string filePath, string bucketName) /// /// The key under which the Amazon S3 object is stored. /// + [Obsolete("Use UploadWithResponse instead which allows you to upload files and also returns response metadata.", error: false)] public void Upload(string filePath, string bucketName, string key) { try @@ -241,6 +243,7 @@ public void Upload(string filePath, string bucketName, string key) /// /// The key under which the Amazon S3 object is stored. /// + [Obsolete("Use UploadWithResponse instead which allows you to upload files and also returns response metadata.", error: false)] public void Upload(Stream stream, string bucketName, string key) { try @@ -273,6 +276,7 @@ public void Upload(Stream stream, string bucketName, string key) /// /// Contains all the parameters required to upload to Amazon S3. /// + [Obsolete("Use UploadWithResponse instead which allows you to upload files and also returns response metadata.", error: false)] public void Upload(TransferUtilityUploadRequest request) { try @@ -285,6 +289,154 @@ public void Upload(TransferUtilityUploadRequest request) } } + /// + /// Uploads the specified file and returns response metadata. + /// The object key is derived from the file's name. + /// Multiple threads are used to read the file and perform multiple uploads in parallel. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// The file path of the file to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. + /// + /// The upload response metadata. + public TransferUtilityUploadResponse UploadWithResponse(string filePath, string bucketName) + { + try + { + return UploadWithResponseAsync(filePath, bucketName).Result; + } + catch (AggregateException e) + { + ExceptionDispatchInfo.Capture(e.InnerException).Throw(); + return null; + } + } + + /// + /// Uploads the specified file and returns response metadata. + /// Multiple threads are used to read the file and perform multiple uploads in parallel. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// The file path of the file to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. + /// + /// + /// The key under which the Amazon S3 object is stored. + /// + /// The upload response metadata. + public TransferUtilityUploadResponse UploadWithResponse(string filePath, string bucketName, string key) + { + try + { + return UploadWithResponseAsync(filePath, bucketName, key).Result; + } + catch (AggregateException e) + { + ExceptionDispatchInfo.Capture(e.InnerException).Throw(); + return null; + } + } + + /// + /// Uploads the contents of the specified stream and returns response metadata. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// The stream to read to obtain the content to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the stream to. + /// + /// + /// The key under which the Amazon S3 object is stored. + /// + /// The upload response metadata. + public TransferUtilityUploadResponse UploadWithResponse(Stream stream, string bucketName, string key) + { + try + { + return UploadWithResponseAsync(stream, bucketName, key).Result; + } + catch (AggregateException e) + { + ExceptionDispatchInfo.Capture(e.InnerException).Throw(); + return null; + } + } + + /// + /// Uploads the file or stream specified by the request and returns response metadata. + /// To track the progress of the upload, + /// add an event listener to the request's UploadProgressEvent. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// Contains all the parameters required to upload to Amazon S3. + /// + /// The upload response metadata. + public TransferUtilityUploadResponse UploadWithResponse(TransferUtilityUploadRequest request) + { + try + { + return UploadWithResponseAsync(request).Result; + } + catch (AggregateException e) + { + ExceptionDispatchInfo.Capture(e.InnerException).Throw(); + return null; + } + } + #endregion #region OpenStream diff --git a/sdk/src/Services/S3/Custom/_async/AmazonS3Client.Extensions.cs b/sdk/src/Services/S3/Custom/_async/AmazonS3Client.Extensions.cs index c66db979c15f..c5ec54b919b0 100644 --- a/sdk/src/Services/S3/Custom/_async/AmazonS3Client.Extensions.cs +++ b/sdk/src/Services/S3/Custom/_async/AmazonS3Client.Extensions.cs @@ -82,7 +82,6 @@ Task ICoreAmazonS3.DeletesAsync(string bucketName, IEnumerable objectKey Task ICoreAmazonS3.UploadObjectFromStreamAsync(string bucketName, string objectKey, Stream stream, IDictionary additionalProperties, CancellationToken cancellationToken) { - var transfer = new Amazon.S3.Transfer.TransferUtility(this); var request = new Amazon.S3.Transfer.TransferUtilityUploadRequest { BucketName = bucketName, @@ -90,7 +89,7 @@ Task ICoreAmazonS3.UploadObjectFromStreamAsync(string bucketName, string objectK InputStream = stream }; InternalSDKUtils.ApplyValuesV2(request, additionalProperties); - return transfer.UploadAsync(request, cancellationToken); + return UploadObjectInternalAsync(request, cancellationToken); } async Task ICoreAmazonS3.GetObjectStreamAsync(string bucketName, string objectKey, IDictionary additionalProperties, CancellationToken cancellationToken) @@ -107,7 +106,6 @@ async Task ICoreAmazonS3.GetObjectStreamAsync(string bucketName, string Task ICoreAmazonS3.UploadObjectFromFilePathAsync(string bucketName, string objectKey, string filepath, IDictionary additionalProperties, CancellationToken cancellationToken) { - var transfer = new Amazon.S3.Transfer.TransferUtility(this); var request = new Amazon.S3.Transfer.TransferUtilityUploadRequest { BucketName = bucketName, @@ -115,8 +113,7 @@ Task ICoreAmazonS3.UploadObjectFromFilePathAsync(string bucketName, string objec FilePath = filepath }; InternalSDKUtils.ApplyValuesV2(request, additionalProperties); - - return transfer.UploadAsync(request, cancellationToken); + return UploadObjectInternalAsync(request, cancellationToken); } Task ICoreAmazonS3.DownloadToFilePathAsync(string bucketName, string objectKey, string filepath, IDictionary additionalProperties, CancellationToken cancellationToken) @@ -156,6 +153,15 @@ async Task ICoreAmazonS3.EnsureBucketExistsAsync(string bucketName) } } #endregion + + #region Internal Helper Methods + + private async Task UploadObjectInternalAsync(Amazon.S3.Transfer.TransferUtilityUploadRequest request, CancellationToken cancellationToken) + { + var transfer = new Amazon.S3.Transfer.TransferUtility(this); + await transfer.UploadWithResponseAsync(request, cancellationToken).ConfigureAwait(false); + } + + #endregion } } - diff --git a/sdk/src/Services/S3/Custom/_bcl/AmazonS3Client.Extensions.cs b/sdk/src/Services/S3/Custom/_bcl/AmazonS3Client.Extensions.cs index 36c668266672..ef543c9cafa2 100644 --- a/sdk/src/Services/S3/Custom/_bcl/AmazonS3Client.Extensions.cs +++ b/sdk/src/Services/S3/Custom/_bcl/AmazonS3Client.Extensions.cs @@ -89,7 +89,7 @@ void ICoreAmazonS3.UploadObjectFromStream(string bucketName, string objectKey, S InputStream = stream }; InternalSDKUtils.ApplyValuesV2(request, additionalProperties); - transfer.Upload(request); + transfer.UploadWithResponse(request); } void ICoreAmazonS3.UploadObjectFromFilePath(string bucketName, string objectKey, string filepath, IDictionary additionalProperties) @@ -102,7 +102,7 @@ void ICoreAmazonS3.UploadObjectFromFilePath(string bucketName, string objectKey, FilePath = filepath }; InternalSDKUtils.ApplyValuesV2(request, additionalProperties); - transfer.Upload(request); + transfer.UploadWithResponse(request); } void ICoreAmazonS3.DownloadToFilePath(string bucketName, string objectKey, string filepath, IDictionary additionalProperties) diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs index 6aaa7ae3a5e3..967f07d4884f 100644 --- a/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs @@ -13,6 +13,7 @@ using Amazon.Util; using System.Net.Mime; using System.Runtime.InteropServices.ComTypes; +using System.Threading.Tasks; namespace AWSSDK_DotNet.IntegrationTests.Tests.S3 { @@ -1433,6 +1434,248 @@ public void TestMultipartUploadWithSetContentTypeNotOverwritten() Assert.IsTrue(metadata.Headers.ContentType.Equals(MediaTypeNames.Text.Plain)); } + [TestMethod] + [TestCategory("S3")] + public async Task UploadWithResponseAsyncSmallFileTest() + { + var fileName = UtilityMethods.GenerateName(@"UploadWithResponseTest\SmallFile"); + var path = Path.Combine(BasePath, fileName); + var fileSize = 1 * MEG_SIZE; // Small file for single-part upload + UtilityMethods.GenerateFile(path, fileSize); + + using (var transferUtility = new TransferUtility(Client)) + { + var request = new TransferUtilityUploadRequest + { + BucketName = bucketName, + FilePath = path, + Key = fileName, + ContentType = octetStreamContentType + }; + + var response = await transferUtility.UploadWithResponseAsync(request); + + // Validate response object is not null + Assert.IsNotNull(response, "Response should not be null"); + + // Validate essential response fields that should always be present + Assert.IsNotNull(response.ETag, "ETag should not be null"); + Assert.IsTrue(response.ETag.Length > 0, "ETag should not be empty"); + + // For small files, we expect single-part upload behavior - ETag should be MD5 format (no quotes or dashes) + // ETag format varies, so we just ensure it's a valid non-empty string + Console.WriteLine($"ETag: {response.ETag}"); + Console.WriteLine($"VersionId: {response.VersionId}"); + + // Validate file was actually uploaded by checking metadata + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = fileName + }); + Assert.AreEqual(fileSize, metadata.ContentLength, "Uploaded file size should match original"); + Assert.AreEqual(response.ETag, metadata.ETag, "ETag from response should match object metadata"); + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadWithResponseAsyncLargeFileTest() + { + var fileName = UtilityMethods.GenerateName(@"UploadWithResponseTest\LargeFile"); + var path = Path.Combine(BasePath, fileName); + var fileSize = 20 * MEG_SIZE; // Large file for multipart upload + UtilityMethods.GenerateFile(path, fileSize); + + using (var transferUtility = new TransferUtility(Client)) + { + var request = new TransferUtilityUploadRequest + { + BucketName = bucketName, + FilePath = path, + Key = fileName, + ContentType = octetStreamContentType + }; + + var response = await transferUtility.UploadWithResponseAsync(request); + + // Validate response object is not null + Assert.IsNotNull(response, "Response should not be null"); + + // Validate essential response fields that should always be present + Assert.IsNotNull(response.ETag, "ETag should not be null"); + Assert.IsTrue(response.ETag.Length > 0, "ETag should not be empty"); + + // For multipart uploads, ETag format is different (contains dashes) + // We just validate it's a valid string for now + Console.WriteLine($"ETag (multipart): {response.ETag}"); + Console.WriteLine($"VersionId: {response.VersionId}"); + + // Validate file was actually uploaded by checking metadata + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = fileName + }); + Assert.AreEqual(fileSize, metadata.ContentLength, "Uploaded file size should match original"); + Assert.AreEqual(response.ETag, metadata.ETag, "ETag from response should match object metadata"); + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadWithResponseAsyncStreamTest() + { + var fileName = UtilityMethods.GenerateName(@"UploadWithResponseTest\StreamFile"); + var path = Path.Combine(BasePath, fileName); + var fileSize = 5 * MEG_SIZE; + UtilityMethods.GenerateFile(path, fileSize); + + using (var transferUtility = new TransferUtility(Client)) + using (var fileStream = File.OpenRead(path)) + { + var request = new TransferUtilityUploadRequest + { + BucketName = bucketName, + InputStream = fileStream, + Key = fileName, + ContentType = octetStreamContentType + }; + + var response = await transferUtility.UploadWithResponseAsync(request); + + // Validate response object is not null + Assert.IsNotNull(response, "Response should not be null"); + + // Validate essential response fields that should always be present + Assert.IsNotNull(response.ETag, "ETag should not be null"); + Assert.IsTrue(response.ETag.Length > 0, "ETag should not be empty"); + + Console.WriteLine($"ETag (stream): {response.ETag}"); + Console.WriteLine($"VersionId: {response.VersionId}"); + + // Validate file was actually streamed and uploaded correctly + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = fileName + }); + Assert.AreEqual(fileSize, metadata.ContentLength, "Uploaded stream size should match original"); + Assert.AreEqual(response.ETag, metadata.ETag, "ETag from response should match object metadata"); + + // Validate content by downloading and comparing + var downloadPath = path + ".download"; + await transferUtility.DownloadAsync(new TransferUtilityDownloadRequest + { + BucketName = bucketName, + Key = fileName, + FilePath = downloadPath + }); + UtilityMethods.CompareFiles(path, downloadPath); + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadWithResponseAsyncWithChecksumTest() + { + var fileName = UtilityMethods.GenerateName(@"UploadWithResponseTest\ChecksumFile"); + var path = Path.Combine(BasePath, fileName); + var fileSize = 2 * MEG_SIZE; + UtilityMethods.GenerateFile(path, fileSize); + + // Calculate checksum for the file + var fileBytes = File.ReadAllBytes(path); + var precalculatedChecksum = CryptoUtilFactory.CryptoInstance.ComputeCRC32Hash(fileBytes); + + using (var transferUtility = new TransferUtility(Client)) + { + var request = new TransferUtilityUploadRequest + { + BucketName = bucketName, + FilePath = path, + Key = fileName, + ContentType = octetStreamContentType, + ChecksumCRC32 = precalculatedChecksum + }; + + var response = await transferUtility.UploadWithResponseAsync(request); + + // Validate response object is not null + Assert.IsNotNull(response, "Response should not be null"); + + // Validate essential response fields + Assert.IsNotNull(response.ETag, "ETag should not be null"); + Assert.IsTrue(response.ETag.Length > 0, "ETag should not be empty"); + + // Validate checksum fields if they should be present + // Note: Checksum fields in response may not always be set depending on S3 behavior + Console.WriteLine($"ETag: {response.ETag}"); + Console.WriteLine($"ChecksumCRC32: {response.ChecksumCRC32}"); + Console.WriteLine($"ChecksumType: {response.ChecksumType}"); + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadWithResponseAsyncCompareWithLegacyUploadTest() + { + var fileName = UtilityMethods.GenerateName(@"UploadWithResponseTest\CompareFile"); + var path = Path.Combine(BasePath, fileName); + var fileSize = 8 * MEG_SIZE; + UtilityMethods.GenerateFile(path, fileSize); + + using (var transferUtility = new TransferUtility(Client)) + { + // Test the new UploadWithResponseAsync method + var responseRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + FilePath = path, + Key = fileName + "-with-response", + ContentType = octetStreamContentType + }; + + var response = await transferUtility.UploadWithResponseAsync(responseRequest); + + // Test the legacy Upload method for comparison + var legacyRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + FilePath = path, + Key = fileName + "-legacy", + ContentType = octetStreamContentType + }; + + await transferUtility.UploadAsync(legacyRequest); + + // Validate that both uploads resulted in the same file being uploaded + var responseMetadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = fileName + "-with-response" + }); + + var legacyMetadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = fileName + "-legacy" + }); + + // Both should have the same file size and content type + Assert.AreEqual(responseMetadata.ContentLength, legacyMetadata.ContentLength, "File sizes should match"); + Assert.AreEqual(responseMetadata.Headers.ContentType, legacyMetadata.Headers.ContentType, "Content types should match"); + + // Validate the response contains the expected ETag + Assert.IsNotNull(response.ETag, "Response ETag should not be null"); + Assert.AreEqual(response.ETag, responseMetadata.ETag, "Response ETag should match metadata ETag"); + + Console.WriteLine($"UploadWithResponseAsync ETag: {response.ETag}"); + Console.WriteLine($"Legacy upload ETag: {legacyMetadata.ETag}"); + Console.WriteLine($"File size: {fileSize}, Response metadata size: {responseMetadata.ContentLength}"); + } + } + #if ASYNC_AWAIT [TestMethod] diff --git a/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json b/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json index 63216442578e..6e08ac4a05d2 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json +++ b/sdk/test/Services/S3/UnitTests/Custom/EmbeddedResource/property-aliases.json @@ -132,6 +132,13 @@ "SSEKMSKeyId": "ServerSideEncryptionKeyManagementServiceKeyId", "ServerSideEncryption": "ServerSideEncryptionMethod", "Restore": "RestoreExpiration" + }, + "TransferUtilityOpenStreamResponse": { + "SSECustomerAlgorithm": "ServerSideEncryptionCustomerMethod", + "SSECustomerKeyMD5": "ServerSideEncryptionCustomerProvidedKeyMD5", + "SSEKMSKeyId": "ServerSideEncryptionKeyManagementServiceKeyId", + "ServerSideEncryption": "ServerSideEncryptionMethod", + "Restore": "RestoreExpiration" } } } \ No newline at end of file diff --git a/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs b/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs index 5f332443d0a5..acaf3c0084e8 100644 --- a/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs +++ b/sdk/test/Services/S3/UnitTests/Custom/ResponseMapperTests.cs @@ -149,8 +149,7 @@ public void MapPutObjectResponse_AllMappedProperties_WorkCorrectly() }, (sourceResponse, targetResponse) => { - Assert.AreEqual(sourceResponse.HttpStatusCode, targetResponse.HttpStatusCode, "HttpStatusCode should match"); - Assert.AreEqual(sourceResponse.ContentLength, targetResponse.ContentLength, "ContentLength should match"); + }); } @@ -524,12 +523,11 @@ public void MapCompleteMultipartUploadResponse_AllMappedProperties_WorkCorrectly (sourceResponse) => { sourceResponse.HttpStatusCode = HttpStatusCode.OK; - sourceResponse.ContentLength = 2048; + sourceResponse.ContentLength = 1024; }, (sourceResponse, targetResponse) => { - Assert.AreEqual(sourceResponse.HttpStatusCode, targetResponse.HttpStatusCode, "HttpStatusCode should match"); - Assert.AreEqual(sourceResponse.ContentLength, targetResponse.ContentLength, "ContentLength should match"); + }); } @@ -600,12 +598,11 @@ public void MapGetObjectResponse_AllMappedProperties_WorkCorrectly() (sourceResponse) => { sourceResponse.HttpStatusCode = HttpStatusCode.OK; - sourceResponse.ContentLength = 2048; + sourceResponse.ContentLength = 1024; }, (sourceResponse, targetResponse) => { - Assert.AreEqual(sourceResponse.HttpStatusCode, targetResponse.HttpStatusCode, "HttpStatusCode should match"); - Assert.AreEqual(sourceResponse.ContentLength, targetResponse.ContentLength, "ContentLength should match"); + }); } @@ -687,6 +684,204 @@ public void ValidateTransferUtilityUploadRequestDefinitionCompleteness() }); } + [TestMethod] + [TestCategory("S3")] + public void MapGetObjectResponseToOpenStream_AllMappedProperties_WorkCorrectly() + { + ValidateMappingTransferUtilityAndSdkRequests( + new[] { "Conversion", "GetObjectResponse", "DownloadResponse" }, + (sourceResponse) => + { + return ResponseMapper.MapGetObjectResponseToOpenStream(sourceResponse); + }, + usesHeadersCollection: true, + (sourceResponse) => + { + sourceResponse.HttpStatusCode = HttpStatusCode.OK; + sourceResponse.ContentLength = 1024; + sourceResponse.ResponseStream = new MemoryStream(new byte[1024]); + }, + (sourceResponse, targetResponse) => + { + Assert.AreSame(sourceResponse.ResponseStream, targetResponse.ResponseStream, "ResponseStream should be the same instance"); + }); + } + + [TestMethod] + [TestCategory("S3")] + public void MapGetObjectResponseToOpenStream_NullValues_HandledCorrectly() + { + // Test null handling scenarios + var testCases = new[] + { + // Test null Expiration + new GetObjectResponse { Expiration = null }, + + // Test null enum conversions + new GetObjectResponse { ChecksumType = null, RequestCharged = null, ServerSideEncryptionMethod = null }, + + // Test null ResponseStream + new GetObjectResponse { ResponseStream = null } + }; + + foreach (var testCase in testCases) + { + var mapped = ResponseMapper.MapGetObjectResponseToOpenStream(testCase); + Assert.IsNotNull(mapped, "Response should always be mappable"); + + // Test null handling + if (testCase.Expiration == null) + { + Assert.IsNull(mapped.Expiration, "Null Expiration should map to null"); + } + + if (testCase.ResponseStream == null) + { + Assert.IsNull(mapped.ResponseStream, "Null ResponseStream should map to null"); + } + } + } + + [TestMethod] + [TestCategory("S3")] + public void MapGetObjectResponseToOpenStream_ResponseStream_HandledCorrectly() + { + // Test with actual stream + var testStream = new MemoryStream(new byte[] { 1, 2, 3, 4, 5 }); + var sourceResponse = new GetObjectResponse + { + ResponseStream = testStream, + ETag = "test-etag", + Headers = { ContentLength = 5 } + }; + + var mappedResponse = ResponseMapper.MapGetObjectResponseToOpenStream(sourceResponse); + + Assert.IsNotNull(mappedResponse, "Mapped response should not be null"); + Assert.AreSame(testStream, mappedResponse.ResponseStream, "ResponseStream should be the same instance"); + Assert.AreEqual("test-etag", mappedResponse.ETag, "Other properties should also be mapped"); + Assert.AreEqual(5, mappedResponse.Headers.ContentLength, "ContentLength should be mapped"); + + // Test with null stream + var sourceWithNullStream = new GetObjectResponse + { + ResponseStream = null, + ETag = "test-etag-2" + }; + + var mappedWithNullStream = ResponseMapper.MapGetObjectResponseToOpenStream(sourceWithNullStream); + + Assert.IsNotNull(mappedWithNullStream, "Mapped response should not be null even with null stream"); + Assert.IsNull(mappedWithNullStream.ResponseStream, "ResponseStream should be null when source is null"); + Assert.AreEqual("test-etag-2", mappedWithNullStream.ETag, "Other properties should still be mapped"); + } + + [TestMethod] + [TestCategory("S3")] + public void MapGetObjectResponseToOpenStream_NullSource_ThrowsArgumentNullException() + { + Assert.ThrowsException(() => + ResponseMapper.MapGetObjectResponseToOpenStream(null), + "Mapping null source should throw ArgumentNullException"); + } + + [TestMethod] + [TestCategory("S3")] + public void TransferUtilityOpenStreamResponse_Dispose_DisposesResponseStream() + { + // Arrange + var memoryStream = new MemoryStream(new byte[] { 1, 2, 3, 4, 5 }); + var response = new TransferUtilityOpenStreamResponse + { + ResponseStream = memoryStream, + ETag = "test-etag" + }; + + // Act + response.Dispose(); + + // Assert - accessing disposed stream should throw ObjectDisposedException + Assert.ThrowsException(() => _ = memoryStream.Length, + "Accessing Length of disposed stream should throw ObjectDisposedException"); + Assert.ThrowsException(() => _ = memoryStream.Position, + "Accessing Position of disposed stream should throw ObjectDisposedException"); + Assert.ThrowsException(() => memoryStream.Read(new byte[1], 0, 1), + "Reading from disposed stream should throw ObjectDisposedException"); + Assert.IsNull(response.ResponseStream, "ResponseStream should be null after disposal"); + } + + [TestMethod] + [TestCategory("S3")] + public void TransferUtilityOpenStreamResponse_Dispose_MultipleCallsSafe() + { + // Arrange + var memoryStream = new MemoryStream(new byte[] { 1, 2, 3, 4, 5 }); + var response = new TransferUtilityOpenStreamResponse + { + ResponseStream = memoryStream + }; + + // Act - call dispose multiple times + response.Dispose(); + response.Dispose(); // Second call should not throw + + // Assert - stream should still be disposed after multiple dispose calls + Assert.ThrowsException(() => _ = memoryStream.Length, + "Stream should remain disposed after multiple dispose calls"); + Assert.ThrowsException(() => memoryStream.Read(new byte[1], 0, 1), + "Stream should remain disposed after multiple dispose calls"); + Assert.IsNull(response.ResponseStream, "ResponseStream should remain null after multiple dispose calls"); + } + + [TestMethod] + [TestCategory("S3")] + public void TransferUtilityOpenStreamResponse_Dispose_NullStreamSafe() + { + // Arrange + var response = new TransferUtilityOpenStreamResponse + { + ResponseStream = null, + ETag = "test-etag" + }; + + // Act & Assert - should not throw + response.Dispose(); + Assert.IsNull(response.ResponseStream, "ResponseStream should remain null"); + } + + [TestMethod] + [TestCategory("S3")] + public void TransferUtilityOpenStreamResponse_UsingStatement_DisposesCorrectly() + { + // Arrange + var memoryStream = new MemoryStream(new byte[] { 1, 2, 3, 4, 5 }); + MemoryStream capturedStream = null; + + // Act + using (var response = new TransferUtilityOpenStreamResponse()) + { + response.ResponseStream = memoryStream; + response.ETag = "test-etag"; + capturedStream = memoryStream; + } // Dispose should be called here + + // Assert - stream should be disposed after using block + Assert.ThrowsException(() => _ = capturedStream.Length, + "Stream should be disposed after using block"); + Assert.ThrowsException(() => capturedStream.Read(new byte[1], 0, 1), + "Stream should be disposed after using block"); + } + + [TestMethod] + [TestCategory("S3")] + public void TransferUtilityOpenStreamResponse_ImplementsIDisposable() + { + // Assert + Assert.IsTrue(typeof(IDisposable).IsAssignableFrom(typeof(TransferUtilityOpenStreamResponse)), + "TransferUtilityOpenStreamResponse should implement IDisposable"); + } + + /// /// Generates appropriate test data for a given property type ///