diff options
-rw-r--r-- | GNUmakefile | 8 | ||||
-rw-r--r-- | GNUmakefile.mingw | 7 | ||||
-rw-r--r-- | TODO | 6 | ||||
-rw-r--r-- | doxyfile | 2 | ||||
-rw-r--r-- | inc/libs3.h | 215 | ||||
-rw-r--r-- | inc/request.h | 20 | ||||
-rw-r--r-- | inc/string_buffer.h | 4 | ||||
-rw-r--r-- | inc/util.h | 4 | ||||
-rw-r--r-- | mswin/libs3.def | 3 | ||||
-rw-r--r-- | src/acl.c | 53 | ||||
-rw-r--r-- | src/bucket.c | 62 | ||||
-rw-r--r-- | src/general.c | 48 | ||||
-rw-r--r-- | src/mingw_functions.c | 2 | ||||
-rw-r--r-- | src/mingw_s3_functions.c | 2 | ||||
-rw-r--r-- | src/object.c | 56 | ||||
-rw-r--r-- | src/request.c | 251 | ||||
-rw-r--r-- | src/request_context.c | 40 | ||||
-rw-r--r-- | src/response_headers_handler.c | 6 | ||||
-rw-r--r-- | src/s3.c | 564 | ||||
-rw-r--r-- | src/service.c | 10 | ||||
-rw-r--r-- | src/service_access_logging.c | 556 | ||||
-rw-r--r-- | src/simplexml.c | 10 | ||||
-rw-r--r-- | src/testsimplexml.c | 2 | ||||
-rw-r--r-- | src/util.c | 8 |
24 files changed, 1594 insertions, 345 deletions
diff --git a/GNUmakefile b/GNUmakefile index 3f8f86b..5b7c1a5 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -86,7 +86,7 @@ ifndef CFLAGS CFLAGS = -O3 endif -CFLAGS += -Wall -Werror -std=c99 -Iinc \ +CFLAGS += -Wall -Werror -Wshadow -Wextra -std=c99 -Iinc \ $(CURL_CFLAGS) $(LIBXML2_CFLAGS) \ -DLIBS3_VER_MAJOR=\"$(LIBS3_VER_MAJOR)\" \ -DLIBS3_VER_MINOR=\"$(LIBS3_VER_MINOR)\" \ @@ -121,6 +121,7 @@ install: libs3 s3 headers install -Dps -m u+rw,go+r $(BUILD)/lib/libs3.so.$(LIBS3_VER_MAJOR) \ $(DESTDIR)/lib/libs3.so.$(LIBS3_VER) ln -sf libs3.so.$(LIBS3_VER) $(DESTDIR)/lib/libs3.so.$(LIBS3_VER_MAJOR) + ln -sf libs3.so.$(LIBS3_VER_MAJOR) $(DESTDIR)/lib/libs3.so # -------------------------------------------------------------------------- @@ -131,6 +132,7 @@ uninstall: rm -f $(DESTDIR)/bin/s3 \ $(DESTDIR)/include/libs3.h \ $(DESTDIR)/lib/libs3.a \ + $(DESTDIR)/lib/libs3.so \ $(DESTDIR)/lib/libs3.so.$(LIBS3_VER_MAJOR) \ $(DESTDIR)/lib/libs3.so.$(LIBS3_VER) \ @@ -249,8 +251,8 @@ libs3: $(LIBS3_SHARED) $(LIBS3_SHARED_MAJOR) $(BUILD)/lib/libs3.a LIBS3_SOURCES := src/acl.c src/bucket.c src/error_parser.c src/general.c \ src/object.c src/request.c src/request_context.c \ - src/response_headers_handler.c src/service.c \ - src/simplexml.c src/util.c + src/response_headers_handler.c src/service_access_logging.c \ + src/service.c src/simplexml.c src/util.c $(LIBS3_SHARED): $(LIBS3_SOURCES:src/%.c=$(BUILD)/obj/%.do) @mkdir -p $(dir $@) diff --git a/GNUmakefile.mingw b/GNUmakefile.mingw index ab223bb..0dff51c 100644 --- a/GNUmakefile.mingw +++ b/GNUmakefile.mingw @@ -86,7 +86,8 @@ ifndef CFLAGS CFLAGS = -O3 endif -CFLAGS += -Wall -Werror -std=c99 -Iinc $(CURL_CFLAGS) $(LIBXML2_CFLAGS) \ +CFLAGS += -Wall -Werror -Wshadow -Wextra -std=c99 -Iinc \ + $(CURL_CFLAGS) $(LIBXML2_CFLAGS) \ -DLIBS3_VER_MAJOR=\"$(LIBS3_VER_MAJOR)\" \ -DLIBS3_VER_MINOR=\"$(LIBS3_VER_MINOR)\" \ -Dsleep=Sleep -DFOPEN_EXTRA_FLAGS=\"b\" \ @@ -143,8 +144,8 @@ libs3: $(LIBS3_SHARED) $(BUILD)/lib/libs3.a LIBS3_SOURCES := src/acl.c src/bucket.c src/error_parser.c src/general.c \ src/object.c src/request.c src/request_context.c \ - src/response_headers_handler.c src/service.c \ - src/simplexml.c src/util.c src/mingw_functions.c + src/response_headers_handler.c src/service_access_logging.c \ + src/service.c src/simplexml.c src/util.c src/mingw_functions.c $(LIBS3_SHARED): $(LIBS3_SOURCES:src/%.c=$(BUILD)/obj/%.o) -@mkdir $(subst /,\,$(dir $@)) @@ -1,9 +1,3 @@ -* Write test case for request_context - -* Implement service logging support - -* Implement function for generating an HTTP authenticated query string - * Implement functions for generating form stuff for posting to s3 * Write s3 man page @@ -23,7 +23,7 @@ PROJECT_NAME = libs3 # This could be handy for archiving the generated documentation or # if some version control system is used. -PROJECT_NUMBER = 0.2 +PROJECT_NUMBER = trunk # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. diff --git a/inc/libs3.h b/inc/libs3.h index 0605e7b..89fbcc4 100644 --- a/inc/libs3.h +++ b/inc/libs3.h @@ -28,7 +28,7 @@ #define LIBS3_H #include <stdint.h> -#include <sys/time.h> +#include <sys/select.h> #ifdef __cplusplus @@ -129,6 +129,12 @@ extern "C" { /** + * S3_MAX_BUCKET_NAME_SIZE is the maximum size of a bucket name. + **/ + +#define S3_MAX_BUCKET_NAME_SIZE 255 + +/** * S3_MAX_KEY_SIZE is the maximum size of keys that Amazon S3 supports. **/ #define S3_MAX_KEY_SIZE 1024 @@ -188,6 +194,17 @@ extern "C" { /** + * This is the maximum number of characters that will be stored in the + * return buffer for the utility function which computes an HTTP authenticated + * query string + **/ +#define S3_MAX_AUTHENTICATED_QUERY_STRING_SIZE \ + (sizeof("https://" S3_HOSTNAME "/") + (S3_MAX_KEY_SIZE * 3) + \ + sizeof("?AWSAccessKeyId=") + 32 + sizeof("&Expires=") + 32 + \ + sizeof("&Signature=") + 28 + 1) + + +/** * This constant is used by the S3_initialize() function, to specify that * the winsock library should be initialized by libs3; only relevent on * Microsoft Windows platforms. @@ -250,15 +267,17 @@ typedef enum S3StatusKeyTooLong , S3StatusUriTooLong , S3StatusXmlParseFailure , - S3StatusBadAclEmailAddressTooLong , - S3StatusBadAclUserIdTooLong , - S3StatusBadAclUserDisplayNameTooLong , - S3StatusBadAclGroupUriTooLong , - S3StatusBadAclPermissionTooLong , - S3StatusTooManyAclGrants , - S3StatusBadAclGrantee , - S3StatusBadAclPermission , - S3StatusAclXmlDocumentTooLarge , + S3StatusEmailAddressTooLong , + S3StatusUserIdTooLong , + S3StatusUserDisplayNameTooLong , + S3StatusGroupUriTooLong , + S3StatusPermissionTooLong , + S3StatusTargetBucketTooLong , + S3StatusTargetPrefixTooLong , + S3StatusTooManyGrants , + S3StatusBadGrantee , + S3StatusBadPermission , + S3StatusXmlDocumentTooLarge , S3StatusNameLookupError , S3StatusFailedToConnect , S3StatusServerFailedVerification , @@ -391,13 +410,16 @@ typedef enum * listing owned buckets * All AWS Users - identifies all authenticated AWS users * All Users - identifies all users + * Log Delivery - identifies the Amazon group responsible for writing + * server access logs into buckets **/ typedef enum { S3GranteeTypeAmazonCustomerByEmail = 0, S3GranteeTypeCanonicalUser = 1, S3GranteeTypeAllAwsUsers = 2, - S3GranteeTypeAllUsers = 3 + S3GranteeTypeAllUsers = 3, + S3GranteeTypeLogDelivery = 4 } S3GranteeType; @@ -534,7 +556,7 @@ typedef struct S3ResponseProperties * of seconds since the UNIX epoch. * **/ - time_t lastModified; + int64_t lastModified; /** * This is the number of user-provided meta data associated with the @@ -662,7 +684,7 @@ typedef struct S3ListBucketContent * This is the number of seconds since UNIX epoch of the last modified * date of the object identified by the key. **/ - time_t lastModified; + int64_t lastModified; /** * This gives a tag which gives a signature of the contents of the object, @@ -705,7 +727,7 @@ typedef struct S3PutProperties /** * If present, this provides the MD5 signature of the contents, and is * used to validate the contents. This is highly recommended by Amazon - * but not required. + * but not required. Its format is as a base64-encoded MD5 sum. **/ const char *md5; @@ -736,7 +758,7 @@ typedef struct S3PutProperties * information is typically only delivered to users who download the * content via a web browser. **/ - time_t expires; + int64_t expires; /** * This identifies the "canned ACL" that should be used for this object. @@ -770,7 +792,7 @@ typedef struct S3GetConditions * seconds since Unix epoch. If this value is less than zero, it will not * be used in the conditional. **/ - time_t ifModifiedSince; + int64_t ifModifiedSince; /** * The request will be processed if the Last-Modification header of the @@ -778,7 +800,7 @@ typedef struct S3GetConditions * Unix epoch. If this value is less than zero, it will not be used in * the conditional. **/ - time_t ifNotModifiedSince; + int64_t ifNotModifiedSince; /** * If non-NULL, this gives an eTag header value which the object must @@ -900,13 +922,15 @@ typedef void (S3ResponseCompleteCallback)(S3Status status, typedef S3Status (S3ListServiceCallback)(const char *ownerId, const char *ownerDisplayName, const char *bucketName, - time_t creationDateSeconds, + int64_t creationDateSeconds, void *callbackData); /** - * This callback is made once for each object resulting from a list bucket - * operation. + * This callback is made repeatedly as a list bucket operation progresses. + * The contents reported via this callback are only reported once per list + * bucket operation, but multiple calls to this callback may be necessary to + * report all items resulting from the list bucket operation. * * @param isTruncated is true if the list bucket request was truncated by the * S3 service, in which case the remainder of the list may be obtained @@ -1045,7 +1069,9 @@ typedef struct S3ListBucketHandler /** * The listBucketCallback is called as items are reported back from S3 as - * responses to the request + * responses to the request. This may be called more than one time per + * list bucket request, each time providing more items from the list + * operation. **/ S3ListBucketCallback *listBucketCallback; } S3ListBucketHandler; @@ -1344,6 +1370,57 @@ S3Status S3_get_request_context_fdsets(S3RequestContext *requestContext, fd_set *exceptFdSet, int *maxFd); +/** + * This function returns the maximum number of milliseconds that the caller of + * S3_runonce_request_context should wait on the fdsets obtained via a call to + * S3_get_request_context_fdsets. In other words, this is essentially the + * select() timeout that needs to be used (shorter values are OK, but no + * longer than this) to ensure that internal timeout code of libs3 can work + * properly. This function should be called right before select() each time + * select() on the request_context fdsets are to be performed by the libs3 + * user. + * + * @param requestContext is the S3RequestContext to get the timeout from + * @return the maximum number of milliseconds to select() on fdsets. Callers + * could wait a shorter time if they wish, but not longer. + **/ +int64_t S3_get_request_context_timeout(S3RequestContext *requestContext); + + +/** ************************************************************************** + * S3 Utility Functions + ************************************************************************** **/ + +/** + * Generates an HTTP authenticated query string, which may then be used by + * a browser (or other web client) to issue the request. The request is + * implicitly a GET request; Amazon S3 is documented to only support this type + * of authenticated query string request. + * + * @param buffer is the output buffer for the authenticated query string. + * It must be at least S3_MAX_AUTHENTICATED_QUERY_STRING_SIZE bytes in + * length. + * @param bucketContext gives the bucket and associated parameters for the + * request to generate. + * @param key gives the key which the authenticated request will GET. + * @param expires gives the number of seconds since Unix epoch for the + * expiration date of the request; after this time, the request will + * no longer be valid. If this value is negative, the largest + * expiration date possible is used (currently, Jan 19, 2038). + * @param resource gives a sub-resource to be fetched for the request, or NULL + * for none. This should be of the form "?<resource>", i.e. + * "?torrent". + * @return One of: + * S3StatusUriTooLong if, due to an internal error, the generated URI + * is longer than S3_MAX_AUTHENTICATED_QUERY_STRING_SIZE bytes in + * length and thus will not fit into the supplied buffer + * S3StatusOK on success + **/ +S3Status S3_generate_authenticated_query_string + (char *buffer, const S3BucketContext *bucketContext, + const char *key, int64_t expires, const char *resource); + + /** ************************************************************************** * Service Functions ************************************************************************** **/ @@ -1370,7 +1447,7 @@ void S3_list_service(S3Protocol protocol, const char *accessKeyId, const S3ListServiceHandler *handler, void *callbackData); - + /** ************************************************************************** * Bucket Functions ************************************************************************** **/ @@ -1559,7 +1636,7 @@ void S3_copy_object(const S3BucketContext *bucketContext, const char *key, const char *destinationBucket, const char *destinationKey, const S3PutProperties *putProperties, - time_t *lastModifiedReturn, int eTagReturnSize, + int64_t *lastModifiedReturn, int eTagReturnSize, char *eTagReturn, S3RequestContext *requestContext, const S3ResponseHandler *handler, void *callbackData); @@ -1698,23 +1775,93 @@ void S3_set_acl(const S3BucketContext *bucketContext, const char *key, const S3ResponseHandler *handler, void *callbackData); -/** - * xxx todo - * Service Logging ... - **/ - +/** ************************************************************************** + * Server Access Log Functions + ************************************************************************** **/ /** - * xxx todo - * function for generating an HTTP authenticated query string + * Gets the service access logging settings for a bucket. The service access + * logging settings specify whether or not the S3 service will write service + * access logs for requests made for the given bucket, and if so, several + * settings controlling how these logs will be written. + * + * @param bucketContext gives the bucket and associated parameters for this + * request; this is the bucket for which service access logging is + * being requested + * @param targetBucketReturn must be passed in as a buffer of at least + * (S3_MAX_BUCKET_NAME_SIZE + 1) bytes in length, and will be filled + * in with the target bucket name for access logging for the given + * bucket, which is the bucket into which access logs for the specified + * bucket will be written. This is returned as an empty string if + * service access logging is not enabled for the given bucket. + * @param targetPrefixReturn must be passed in as a buffer of at least + * (S3_MAX_KEY_SIZE + 1) bytes in length, and will be filled in + * with the key prefix for server access logs for the given bucket, + * or the empty string if no such prefix is specified. + * @param aclGrantCountReturn returns the number of ACL grants that are + * associated with the server access logging for the given bucket. + * @param aclGrants must be passed in as an array of at least + * S3_MAX_ACL_GRANT_COUNT S3AclGrant structures, and these will be + * filled in with the target grants associated with the server access + * logging for the given bucket, whose number is returned in the + * aclGrantCountReturn parameter. These grants will be applied to the + * ACL of any server access logging log files generated by the S3 + * service for the given bucket. + * @param requestContext if non-NULL, gives the S3RequestContext to add this + * request to, and does not perform the request immediately. If NULL, + * performs the request immediately and synchronously. + * @param handler gives the callbacks to call as the request is processed and + * completed + * @param callbackData will be passed in as the callbackData parameter to + * all callbacks for this request **/ - +void S3_get_server_access_logging(const S3BucketContext *bucketContext, + char *targetBucketReturn, + char *targetPrefixReturn, + int *aclGrantCountReturn, + S3AclGrant *aclGrants, + S3RequestContext *requestContext, + const S3ResponseHandler *handler, + void *callbackData); + /** - * xxx todo - * functions for generating form stuff for posting to s3 + * Sets the service access logging settings for a bucket. The service access + * logging settings specify whether or not the S3 service will write service + * access logs for requests made for the given bucket, and if so, several + * settings controlling how these logs will be written. + * + * @param bucketContext gives the bucket and associated parameters for this + * request; this is the bucket for which service access logging is + * being set + * @param targetBucket gives the target bucket name for access logging for the + * given bucket, which is the bucket into which access logs for the + * specified bucket will be written. + * @param targetPrefix is an option parameter which specifies the key prefix + * for server access logs for the given bucket, or NULL if no such + * prefix is to be used. + * @param aclGrantCount specifies the number of ACL grants that are to be + * associated with the server access logging for the given bucket. + * @param aclGrants is as an array of S3AclGrant structures, whose number is + * given by the aclGrantCount parameter. These grants will be applied + * to the ACL of any server access logging log files generated by the + * S3 service for the given bucket. + * @param requestContext if non-NULL, gives the S3RequestContext to add this + * request to, and does not perform the request immediately. If NULL, + * performs the request immediately and synchronously. + * @param handler gives the callbacks to call as the request is processed and + * completed + * @param callbackData will be passed in as the callbackData parameter to + * all callbacks for this request **/ - +void S3_set_server_access_logging(const S3BucketContext *bucketContext, + const char *targetBucket, + const char *targetPrefix, int aclGrantCount, + const S3AclGrant *aclGrants, + S3RequestContext *requestContext, + const S3ResponseHandler *handler, + void *callbackData); + #ifdef __cplusplus } diff --git a/inc/request.h b/inc/request.h index 750e39a..afb4929 100644 --- a/inc/request.h +++ b/inc/request.h @@ -51,14 +51,8 @@ typedef struct RequestParams // Request type, affects the HTTP verb used HttpRequestType httpRequestType; - // Protocol to use for request - S3Protocol protocol; - - // URI style to use for request - S3UriStyle uriStyle; - - // Bucket name, if any - const char *bucketName; + // Bucket context for request + S3BucketContext bucketContext; // Key, if any const char *key; @@ -69,12 +63,6 @@ typedef struct RequestParams // sub resource, like ?acl, ?location, ?torrent, ?logging const char *subResource; - // AWS Access Key ID - const char *accessKeyId; - - // AWS Secret Access Key - const char *secretAccessKey; - // If this is a copy operation, this gives the source bucket const char *copySourceBucketName; @@ -148,8 +136,8 @@ typedef struct Request // Callback to be made to supply data to send to S3. Might not be called. S3PutObjectDataCallback *toS3Callback; - // Number of bytes total that readCallback will supply - int64_t toS3CallbackTotalSize; + // Number of bytes total that readCallback has left to supply + int64_t toS3CallbackBytesRemaining; // Callback to be made that supplies data read from S3. // Might not be called. diff --git a/inc/string_buffer.h b/inc/string_buffer.h index 472baca..eed9bd4 100644 --- a/inc/string_buffer.h +++ b/inc/string_buffer.h @@ -50,7 +50,7 @@ do { \ sb##Len += snprintf(&(sb[sb##Len]), sizeof(sb) - sb##Len - 1, \ "%.*s", (int) (len), str); \ - if (sb##Len > (sizeof(sb) - 1)) { \ + if (sb##Len > (int) (sizeof(sb) - 1)) { \ sb##Len = sizeof(sb) - 1; \ all_fit = 0; \ } \ @@ -84,7 +84,7 @@ smb##Size += (snprintf(&(smb[smb##Size]), \ sizeof(smb) - smb##Size, \ "%.*s", (int) (len), str) + 1); \ - if (smb##Size > sizeof(smb)) { \ + if (smb##Size > (int) sizeof(smb)) { \ smb##Size = sizeof(smb); \ all_fit = 0; \ } \ @@ -66,14 +66,14 @@ int urlEncode(char *dest, const char *src, int maxSrcSize); // Returns < 0 on failure >= 0 on success -time_t parseIso8601Time(const char *str); +int64_t parseIso8601Time(const char *str); uint64_t parseUnsignedInt(const char *str); // base64 encode bytes. The output buffer must have at least // ((4 * (inLen + 1)) / 3) bytes in it. Returns the number of bytes written // to [out]. -int base64Encode(const unsigned char *in, int inLen, unsigned char *out); +int base64Encode(const unsigned char *in, int inLen, char *out); // Compute HMAC-SHA-1 with key [key] and message [message], storing result // in [hmac] diff --git a/mswin/libs3.def b/mswin/libs3.def index 031386a..c5bd6d8 100644 --- a/mswin/libs3.def +++ b/mswin/libs3.def @@ -7,9 +7,11 @@ S3_deinitialize S3_delete_bucket S3_delete_object S3_destroy_request_context +S3_generate_authenticated_query_string S3_get_acl S3_get_object S3_get_request_context_fdsets +S3_get_server_access_logging S3_get_status_name S3_head_object S3_initialize @@ -19,6 +21,7 @@ S3_put_object S3_runall_request_context S3_runonce_request_context S3_set_acl +S3_set_server_access_logging S3_status_is_retryable S3_test_bucket S3_validate_bucket_name @@ -70,7 +70,7 @@ static S3Status getAclDataCallback(int bufferSize, const char *buffer, string_buffer_append(gaData->aclXmlDocument, buffer, bufferSize, fit); - return fit ? S3StatusOK : S3StatusAclXmlDocumentTooLarge; + return fit ? S3StatusOK : S3StatusXmlDocumentTooLarge; } @@ -122,14 +122,14 @@ void S3_get_acl(const S3BucketContext *bucketContext, const char *key, RequestParams params = { HttpRequestTypeGET, // httpRequestType - bucketContext->protocol, // protocol - bucketContext->uriStyle, // uriStyle - bucketContext->bucketName, // bucketName + { bucketContext->bucketName, // bucketName + bucketContext->protocol, // protocol + bucketContext->uriStyle, // uriStyle + bucketContext->accessKeyId, // accessKeyId + bucketContext->secretAccessKey }, // secretAccessKey key, // key 0, // queryParams - "?acl", // subResource - bucketContext->accessKeyId, // accessKeyId - bucketContext->secretAccessKey, // secretAccessKey + "acl", // subResource 0, // copySourceBucketName 0, // copySourceKey 0, // getConditions @@ -168,7 +168,7 @@ static S3Status generateAclXmlDocument(const char *ownerId, xmlDocumentBufferSize - *xmlDocumentLenReturn - 1, \ fmt, __VA_ARGS__); \ if (*xmlDocumentLenReturn >= xmlDocumentBufferSize) { \ - return S3StatusAclXmlDocumentTooLarge; \ + return S3StatusXmlDocumentTooLarge; \ } \ } while (0) @@ -191,11 +191,24 @@ static S3Status generateAclXmlDocument(const char *ownerId, grant->grantee.canonicalUser.id, grant->grantee.canonicalUser.displayName); break; - default: // case S3GranteeTypeAllAwsUsers/S3GranteeTypeAllUsers: - append("Group\"><URI>http://acs.amazonaws.com/groups/" - "global/%s</URI>", - (grant->granteeType == S3GranteeTypeAllAwsUsers) ? - "AuthenticatedUsers" : "AllUsers"); + default: { // case S3GranteeTypeAllAwsUsers/S3GranteeTypeAllUsers: + const char *grantee; + switch (grant->granteeType) { + case S3GranteeTypeAllAwsUsers: + grantee = "http://acs.amazonaws.com/groups/global/" + "AuthenticatedUsers"; + break; + case S3GranteeTypeAllUsers: + grantee = "http://acs.amazonaws.com/groups/global/" + "AllUsers"; + break; + default: + grantee = "http://acs.amazonaws.com/groups/s3/" + "LogDelivery"; + break; + } + append("Group\"><URI>%s</URI>", grantee); + } break; } append("</Grantee><Permission>%s</Permission></Grant>", @@ -278,7 +291,7 @@ void S3_set_acl(const S3BucketContext *bucketContext, const char *key, { if (aclGrantCount > S3_MAX_ACL_GRANT_COUNT) { (*(handler->completeCallback)) - (S3StatusTooManyAclGrants, 0, callbackData); + (S3StatusTooManyGrants, 0, callbackData); return; } @@ -309,14 +322,14 @@ void S3_set_acl(const S3BucketContext *bucketContext, const char *key, RequestParams params = { HttpRequestTypePUT, // httpRequestType - bucketContext->protocol, // protocol - bucketContext->uriStyle, // uriStyle - bucketContext->bucketName, // bucketName + { bucketContext->bucketName, // bucketName + bucketContext->protocol, // protocol + bucketContext->uriStyle, // uriStyle + bucketContext->accessKeyId, // accessKeyId + bucketContext->secretAccessKey }, // secretAccessKey key, // key 0, // queryParams - "?acl", // subResource - bucketContext->accessKeyId, // accessKeyId - bucketContext->secretAccessKey, // secretAccessKey + "acl", // subResource 0, // copySourceBucketName 0, // copySourceKey 0, // getConditions diff --git a/src/bucket.c b/src/bucket.c index a2446df..9dcc48c 100644 --- a/src/bucket.c +++ b/src/bucket.c @@ -131,14 +131,14 @@ void S3_test_bucket(S3Protocol protocol, S3UriStyle uriStyle, RequestParams params = { HttpRequestTypeGET, // httpRequestType - protocol, // protocol - uriStyle, // uriStyle - bucketName, // bucketName + { bucketName, // bucketName + protocol, // protocol + uriStyle, // uriStyle + accessKeyId, // accessKeyId + secretAccessKey }, // secretAccessKey 0, // key 0, // queryParams - "?location", // subResource - accessKeyId, // accessKeyId - secretAccessKey, // secretAccessKey + "location", // subResource 0, // copySourceBucketName 0, // copySourceKey 0, // getConditions @@ -267,14 +267,14 @@ void S3_create_bucket(S3Protocol protocol, const char *accessKeyId, RequestParams params = { HttpRequestTypePUT, // httpRequestType - protocol, // protocol - S3UriStylePath, // uriStyle - bucketName, // bucketName + { bucketName, // bucketName + protocol, // protocol + S3UriStylePath, // uriStyle + accessKeyId, // accessKeyId + secretAccessKey }, // secretAccessKey 0, // key 0, // queryParams 0, // subResource - accessKeyId, // accessKeyId - secretAccessKey, // secretAccessKey 0, // copySourceBucketName 0, // copySourceKey 0, // getConditions @@ -349,14 +349,14 @@ void S3_delete_bucket(S3Protocol protocol, S3UriStyle uriStyle, RequestParams params = { HttpRequestTypeDELETE, // httpRequestType - protocol, // protocol - uriStyle, // uriStyle - bucketName, // bucketName + { bucketName, // bucketName + protocol, // protocol + uriStyle, // uriStyle + accessKeyId, // accessKeyId + secretAccessKey }, // secretAccessKey 0, // key 0, // queryParams 0, // subResource - accessKeyId, // accessKeyId - secretAccessKey, // secretAccessKey 0, // copySourceBucketName 0, // copySourceKey 0, // getConditions @@ -532,7 +532,7 @@ static S3Status listBucketXmlCallback(const char *elementPath, lbData->commonPrefixLens[which] - 1, "%.*s", dataLen, data); if (lbData->commonPrefixLens[which] >= - sizeof(lbData->commonPrefixes[which])) { + (int) sizeof(lbData->commonPrefixes[which])) { return S3StatusXmlParseFailure; } } @@ -612,6 +612,8 @@ static void listBucketCompleteCallback(S3Status requestStatus, (*(lbData->responseCompleteCallback)) (requestStatus, s3ErrorDetails, lbData->callbackData); + simplexml_deinitialize(&(lbData->simpleXml)); + free(lbData); } @@ -628,11 +630,13 @@ void S3_list_bucket(const S3BucketContext *bucketContext, const char *prefix, #define safe_append(name, value) \ do { \ int fit; \ - string_buffer_append(queryParams, &sep, 1, fit); \ - if (!fit) { \ - (*(handler->responseHandler.completeCallback)) \ - (S3StatusQueryParamsTooLong, 0, callbackData); \ - return; \ + if (amp) { \ + string_buffer_append(queryParams, "&", 1, fit); \ + if (!fit) { \ + (*(handler->responseHandler.completeCallback)) \ + (S3StatusQueryParamsTooLong, 0, callbackData); \ + return; \ + } \ } \ string_buffer_append(queryParams, name "=", \ sizeof(name "=") - 1, fit); \ @@ -641,7 +645,7 @@ void S3_list_bucket(const S3BucketContext *bucketContext, const char *prefix, (S3StatusQueryParamsTooLong, 0, callbackData); \ return; \ } \ - sep = '&'; \ + amp = 1; \ char encoded[3 * 1024]; \ if (!urlEncode(encoded, value, 1024)) { \ (*(handler->responseHandler.completeCallback)) \ @@ -658,7 +662,7 @@ void S3_list_bucket(const S3BucketContext *bucketContext, const char *prefix, } while (0) - char sep = '?'; + int amp = 0; if (prefix) { safe_append("prefix", prefix); } @@ -700,14 +704,14 @@ void S3_list_bucket(const S3BucketContext *bucketContext, const char *prefix, RequestParams params = { HttpRequestTypeGET, // httpRequestType - bucketContext->protocol, // protocol - bucketContext->uriStyle, // uriStyle - bucketContext->bucketName, // bucketName + { bucketContext->bucketName, // bucketName + bucketContext->protocol, // protocol + bucketContext->uriStyle, // uriStyle + bucketContext->accessKeyId, // accessKeyId + bucketContext->secretAccessKey }, // secretAccessKey 0, // key queryParams[0] ? queryParams : 0, // queryParams 0, // subResource - bucketContext->accessKeyId, // accessKeyId - bucketContext->secretAccessKey, // secretAccessKey 0, // copySourceBucketName 0, // copySourceKey 0, // getConditions diff --git a/src/general.c b/src/general.c index a9645f0..861c289 100644 --- a/src/general.c +++ b/src/general.c @@ -90,15 +90,17 @@ const char *S3_get_status_name(S3Status status) handlecase(KeyTooLong); handlecase(UriTooLong); handlecase(XmlParseFailure); - handlecase(BadAclEmailAddressTooLong); - handlecase(BadAclUserIdTooLong); - handlecase(BadAclUserDisplayNameTooLong); - handlecase(BadAclGroupUriTooLong); - handlecase(BadAclPermissionTooLong); - handlecase(TooManyAclGrants); - handlecase(BadAclGrantee); - handlecase(BadAclPermission); - handlecase(AclXmlDocumentTooLarge); + handlecase(EmailAddressTooLong); + handlecase(UserIdTooLong); + handlecase(UserDisplayNameTooLong); + handlecase(GroupUriTooLong); + handlecase(PermissionTooLong); + handlecase(TargetBucketTooLong); + handlecase(TargetPrefixTooLong); + handlecase(TooManyGrants); + handlecase(BadGrantee); + handlecase(BadPermission); + handlecase(XmlDocumentTooLarge); handlecase(NameLookupError); handlecase(FailedToConnect); handlecase(ServerFailedVerification); @@ -283,7 +285,7 @@ static S3Status convertAclXmlCallback(const char *elementPath, S3_MAX_GRANTEE_USER_ID_SIZE - caData->ownerIdLen - 1, "%.*s", dataLen, data); if (caData->ownerIdLen >= S3_MAX_GRANTEE_USER_ID_SIZE) { - return S3StatusBadAclUserIdTooLong; + return S3StatusUserIdTooLong; } } else if (!strcmp(elementPath, "AccessControlPolicy/Owner/" @@ -296,7 +298,7 @@ static S3Status convertAclXmlCallback(const char *elementPath, "%.*s", dataLen, data); if (caData->ownerDisplayNameLen >= S3_MAX_GRANTEE_DISPLAY_NAME_SIZE) { - return S3StatusBadAclUserDisplayNameTooLong; + return S3StatusUserDisplayNameTooLong; } } else if (!strcmp(elementPath, @@ -305,7 +307,7 @@ static S3Status convertAclXmlCallback(const char *elementPath, // AmazonCustomerByEmail string_buffer_append(caData->emailAddress, data, dataLen, fit); if (!fit) { - return S3StatusBadAclEmailAddressTooLong; + return S3StatusEmailAddressTooLong; } } else if (!strcmp(elementPath, @@ -314,7 +316,7 @@ static S3Status convertAclXmlCallback(const char *elementPath, // CanonicalUser string_buffer_append(caData->userId, data, dataLen, fit); if (!fit) { - return S3StatusBadAclUserIdTooLong; + return S3StatusUserIdTooLong; } } else if (!strcmp(elementPath, @@ -323,7 +325,7 @@ static S3Status convertAclXmlCallback(const char *elementPath, // CanonicalUser string_buffer_append(caData->userDisplayName, data, dataLen, fit); if (!fit) { - return S3StatusBadAclUserDisplayNameTooLong; + return S3StatusUserDisplayNameTooLong; } } else if (!strcmp(elementPath, @@ -332,7 +334,7 @@ static S3Status convertAclXmlCallback(const char *elementPath, // Group string_buffer_append(caData->groupUri, data, dataLen, fit); if (!fit) { - return S3StatusBadAclGroupUriTooLong; + return S3StatusGroupUriTooLong; } } else if (!strcmp(elementPath, @@ -341,7 +343,7 @@ static S3Status convertAclXmlCallback(const char *elementPath, // Permission string_buffer_append(caData->permission, data, dataLen, fit); if (!fit) { - return S3StatusBadAclPermissionTooLong; + return S3StatusPermissionTooLong; } } } @@ -351,7 +353,7 @@ static S3Status convertAclXmlCallback(const char *elementPath, // A grant has just been completed; so add the next S3AclGrant // based on the values read if (*(caData->aclGrantCountReturn) == S3_MAX_ACL_GRANT_COUNT) { - return S3StatusTooManyAclGrants; + return S3StatusTooManyGrants; } S3AclGrant *grant = &(caData->aclGrants @@ -379,12 +381,17 @@ static S3Status convertAclXmlCallback(const char *elementPath, "AllUsers")) { grant->granteeType = S3GranteeTypeAllUsers; } + else if (!strcmp(caData->groupUri, + "http://acs.amazonaws.com/groups/s3/" + "LogDelivery")) { + grant->granteeType = S3GranteeTypeLogDelivery; + } else { - return S3StatusBadAclGrantee; + return S3StatusBadGrantee; } } else { - return S3StatusBadAclGrantee; + return S3StatusBadGrantee; } if (!strcmp(caData->permission, "READ")) { @@ -403,7 +410,7 @@ static S3Status convertAclXmlCallback(const char *elementPath, grant->permission = S3PermissionFullControl; } else { - return S3StatusBadAclPermission; + return S3StatusBadPermission; } (*(caData->aclGrantCountReturn))++; @@ -466,4 +473,3 @@ int S3_status_is_retryable(S3Status status) return 0; } } - diff --git a/src/mingw_functions.c b/src/mingw_functions.c index 5ba2211..0e2b7b2 100644 --- a/src/mingw_functions.c +++ b/src/mingw_functions.c @@ -35,6 +35,8 @@ unsigned long pthread_self() int pthread_mutex_init(pthread_mutex_t *mutex, void *v) { + (void) v; + InitializeCriticalSection(&(mutex->criticalSection)); return 0; diff --git a/src/mingw_s3_functions.c b/src/mingw_s3_functions.c index 478083f..142569d 100644 --- a/src/mingw_s3_functions.c +++ b/src/mingw_s3_functions.c @@ -26,6 +26,8 @@ int setenv(const char *a, const char *b, int c) { + (void) c; + return SetEnvironmentVariable(a, b); } diff --git a/src/object.c b/src/object.c index dd77c61..4c8fd1c 100644 --- a/src/object.c +++ b/src/object.c @@ -42,14 +42,14 @@ void S3_put_object(const S3BucketContext *bucketContext, const char *key, RequestParams params = { HttpRequestTypePUT, // httpRequestType - bucketContext->protocol, // protocol - bucketContext->uriStyle, // uriStyle - bucketContext->bucketName, // bucketName + { bucketContext->bucketName, // bucketName + bucketContext->protocol, // protocol + bucketContext->uriStyle, // uriStyle + bucketContext->accessKeyId, // accessKeyId + bucketContext->secretAccessKey }, // secretAccessKey key, // key 0, // queryParams 0, // subResource - bucketContext->accessKeyId, // accessKeyId - bucketContext->secretAccessKey, // secretAccessKey 0, // copySourceBucketName 0, // copySourceKey 0, // getConditions @@ -80,7 +80,7 @@ typedef struct CopyObjectData S3ResponseCompleteCallback *responseCompleteCallback; void *callbackData; - time_t *lastModifiedReturn; + int64_t *lastModifiedReturn; int eTagReturnSize; char *eTagReturn; int eTagReturnLen; @@ -165,7 +165,7 @@ static void copyObjectCompleteCallback(S3Status requestStatus, void S3_copy_object(const S3BucketContext *bucketContext, const char *key, const char *destinationBucket, const char *destinationKey, const S3PutProperties *putProperties, - time_t *lastModifiedReturn, int eTagReturnSize, + int64_t *lastModifiedReturn, int eTagReturnSize, char *eTagReturn, S3RequestContext *requestContext, const S3ResponseHandler *handler, void *callbackData) { @@ -196,15 +196,15 @@ void S3_copy_object(const S3BucketContext *bucketContext, const char *key, RequestParams params = { HttpRequestTypeCOPY, // httpRequestType - bucketContext->protocol, // protocol - bucketContext->uriStyle, // uriStyle - destinationBucket ? destinationBucket : - bucketContext->bucketName, // bucketName + { destinationBucket ? destinationBucket : + bucketContext->bucketName, // bucketName + bucketContext->protocol, // protocol + bucketContext->uriStyle, // uriStyle + bucketContext->accessKeyId, // accessKeyId + bucketContext->secretAccessKey }, // secretAccessKey destinationKey ? destinationKey : key, // key 0, // queryParams 0, // subResource - bucketContext->accessKeyId, // accessKeyId - bucketContext->secretAccessKey, // secretAccessKey bucketContext->bucketName, // copySourceBucketName key, // copySourceKey 0, // getConditions @@ -236,14 +236,14 @@ void S3_get_object(const S3BucketContext *bucketContext, const char *key, RequestParams params = { HttpRequestTypeGET, // httpRequestType - bucketContext->protocol, // protocol - bucketContext->uriStyle, // uriStyle - bucketContext->bucketName, // bucketName + { bucketContext->bucketName, // bucketName + bucketContext->protocol, // protocol + bucketContext->uriStyle, // uriStyle + bucketContext->accessKeyId, // accessKeyId + bucketContext->secretAccessKey }, // secretAccessKey key, // key 0, // queryParams 0, // subResource - bucketContext->accessKeyId, // accessKeyId - bucketContext->secretAccessKey, // secretAccessKey 0, // copySourceBucketName 0, // copySourceKey getConditions, // getConditions @@ -273,14 +273,14 @@ void S3_head_object(const S3BucketContext *bucketContext, const char *key, RequestParams params = { HttpRequestTypeHEAD, // httpRequestType - bucketContext->protocol, // protocol - bucketContext->uriStyle, // uriStyle - bucketContext->bucketName, // bucketName + { bucketContext->bucketName, // bucketName + bucketContext->protocol, // protocol + bucketContext->uriStyle, // uriStyle + bucketContext->accessKeyId, // accessKeyId + bucketContext->secretAccessKey }, // secretAccessKey key, // key 0, // queryParams 0, // subResource - bucketContext->accessKeyId, // accessKeyId - bucketContext->secretAccessKey, // secretAccessKey 0, // copySourceBucketName 0, // copySourceKey 0, // getConditions @@ -310,14 +310,14 @@ void S3_delete_object(const S3BucketContext *bucketContext, const char *key, RequestParams params = { HttpRequestTypeDELETE, // httpRequestType - bucketContext->protocol, // protocol - bucketContext->uriStyle, // uriStyle - bucketContext->bucketName, // bucketName + { bucketContext->bucketName, // bucketName + bucketContext->protocol, // protocol + bucketContext->uriStyle, // uriStyle + bucketContext->accessKeyId, // accessKeyId + bucketContext->secretAccessKey }, // secretAccessKey key, // key 0, // queryParams 0, // subResource - bucketContext->accessKeyId, // accessKeyId - bucketContext->secretAccessKey, // secretAccessKey 0, // copySourceBucketName 0, // copySourceKey 0, // getConditions diff --git a/src/request.c b/src/request.c index d5b455e..1b9e386 100644 --- a/src/request.c +++ b/src/request.c @@ -172,19 +172,31 @@ static size_t curl_read_func(void *ptr, size_t size, size_t nmemb, void *data) return CURL_READFUNC_ABORT; } - if (request->toS3Callback) { - int ret = (*(request->toS3Callback)) - (len, (char *) ptr, request->callbackData); - if (ret < 0) { - request->status = S3StatusAbortedByCallback; - return CURL_READFUNC_ABORT; - } - else { - return ret; - } + // If there is no data callback, or the data callback has already returned + // contentLength bytes, return 0; + if (!request->toS3Callback || !request->toS3CallbackBytesRemaining) { + return 0; + } + + // Don't tell the callback that we are willing to accept more data than we + // really are + if (len > request->toS3CallbackBytesRemaining) { + len = request->toS3CallbackBytesRemaining; + } + + // Otherwise, make the data callback + int ret = (*(request->toS3Callback)) + (len, (char *) ptr, request->callbackData); + if (ret < 0) { + request->status = S3StatusAbortedByCallback; + return CURL_READFUNC_ABORT; } else { - return 0; + if (ret > request->toS3CallbackBytesRemaining) { + ret = request->toS3CallbackBytesRemaining; + } + request->toS3CallbackBytesRemaining -= ret; + return ret; } } @@ -253,7 +265,7 @@ static S3Status compose_amz_headers(const RequestParams *params, len += snprintf(&(values->amzHeadersRaw[len]), \ sizeof(values->amzHeadersRaw) - len, \ format, __VA_ARGS__); \ - if (len >= sizeof(values->amzHeadersRaw)) { \ + if (len >= (int) sizeof(values->amzHeadersRaw)) { \ return S3StatusMetaDataHeadersTooLong; \ } \ while ((len > 0) && (values->amzHeadersRaw[len - 1] == ' ')) { \ @@ -266,7 +278,7 @@ static S3Status compose_amz_headers(const RequestParams *params, do { \ values->amzHeaders[values->amzHeadersCount++] = \ &(values->amzHeadersRaw[len]); \ - if ((len + l) >= sizeof(values->amzHeadersRaw)) { \ + if ((len + l) >= (int) sizeof(values->amzHeadersRaw)) { \ return S3StatusMetaDataHeadersTooLong; \ } \ int todo = l; \ @@ -362,7 +374,7 @@ static S3Status compose_standard_headers(const RequestParams *params, /* Compose header, make sure it all fit */ \ int len = snprintf(values-> destField, \ sizeof(values-> destField), fmt, val); \ - if (len >= sizeof(values-> destField)) { \ + if (len >= (int) sizeof(values-> destField)) { \ return tooLongError; \ } \ /* Now remove the whitespace at the end */ \ @@ -392,7 +404,7 @@ static S3Status compose_standard_headers(const RequestParams *params, /* Compose header, make sure it all fit */ \ int len = snprintf(values-> destField, \ sizeof(values-> destField), fmt, val); \ - if (len >= sizeof(values-> destField)) { \ + if (len >= (int) sizeof(values-> destField)) { \ return tooLongError; \ } \ /* Now remove the whitespace at the end */ \ @@ -431,9 +443,9 @@ static S3Status compose_standard_headers(const RequestParams *params, // Expires if (params->putProperties && (params->putProperties->expires >= 0)) { + time_t t = (time_t) params->putProperties->expires; strftime(values->expiresHeader, sizeof(values->expiresHeader), - "Expires: %a, %d %b %Y %H:%M:%S UTC", - gmtime(&(params->putProperties->expires))); + "Expires: %a, %d %b %Y %H:%M:%S UTC", gmtime(&t)); } else { values->expiresHeader[0] = 0; @@ -442,10 +454,10 @@ static S3Status compose_standard_headers(const RequestParams *params, // If-Modified-Since if (params->getConditions && (params->getConditions->ifModifiedSince >= 0)) { + time_t t = (time_t) params->getConditions->ifModifiedSince; strftime(values->ifModifiedSinceHeader, sizeof(values->ifModifiedSinceHeader), - "If-Modified-Since: %a, %d %b %Y %H:%M:%S UTC", - gmtime(&(params->getConditions->ifModifiedSince))); + "If-Modified-Since: %a, %d %b %Y %H:%M:%S UTC", gmtime(&t)); } else { values->ifModifiedSinceHeader[0] = 0; @@ -454,10 +466,10 @@ static S3Status compose_standard_headers(const RequestParams *params, // If-Unmodified-Since header if (params->getConditions && (params->getConditions->ifNotModifiedSince >= 0)) { + time_t t = (time_t) params->getConditions->ifNotModifiedSince; strftime(values->ifUnmodifiedSinceHeader, sizeof(values->ifUnmodifiedSinceHeader), - "If-Unmodified-Since: %a, %d %b %Y %H:%M:%S UTC", - gmtime(&(params->getConditions->ifNotModifiedSince))); + "If-Unmodified-Since: %a, %d %b %Y %H:%M:%S UTC", gmtime(&t)); } else { values->ifUnmodifiedSinceHeader[0] = 0; @@ -622,29 +634,31 @@ static void canonicalize_amz_headers(RequestComputedValues *values) // Canonicalizes the resource into params->canonicalizedResource -static void canonicalize_resource(const RequestParams *params, - RequestComputedValues *values) +static void canonicalize_resource(const char *bucketName, + const char *subResource, + const char *urlEncodedKey, + char *buffer) { - char *buffer = values->canonicalizedResource; int len = 0; *buffer = 0; #define append(str) len += sprintf(&(buffer[len]), "%s", str) - if (params->bucketName && params->bucketName[0]) { + if (bucketName && bucketName[0]) { buffer[len++] = '/'; - append(params->bucketName); + append(bucketName); } append("/"); - if (values->urlEncodedKey[0]) { - append(values->urlEncodedKey); + if (urlEncodedKey && urlEncodedKey[0]) { + append(urlEncodedKey); } - if (params->subResource && params->subResource[0]) { - append(params->subResource); + if (subResource && subResource[0]) { + append("?"); + append(subResource); } } @@ -672,9 +686,9 @@ static S3Status compose_auth_header(const RequestParams *params, { // We allow for: // 17 bytes for HTTP-Verb + \n - // 129 bytes for MD5 + \n + // 129 bytes for Content-MD5 + \n // 129 bytes for Content-Type + \n - // 1 byte for Data + \n + // 1 byte for empty Date + \n // CanonicalizedAmzHeaders & CanonicalizedResource char signbuf[17 + 129 + 129 + 1 + (sizeof(values->canonicalizedAmzHeaders) - 1) + @@ -706,45 +720,48 @@ static S3Status compose_auth_header(const RequestParams *params, // Generate an HMAC-SHA-1 of the signbuf unsigned char hmac[20]; - HMAC_SHA1(hmac, (unsigned char *) params->secretAccessKey, - strlen(params->secretAccessKey), + HMAC_SHA1(hmac, (unsigned char *) params->bucketContext.secretAccessKey, + strlen(params->bucketContext.secretAccessKey), (unsigned char *) signbuf, len); // Now base-64 encode the results - unsigned char b64[((20 + 1) * 4) / 3]; + char b64[((20 + 1) * 4) / 3]; int b64Len = base64Encode(hmac, 20, b64); snprintf(values->authorizationHeader, sizeof(values->authorizationHeader), - "Authorization: AWS %s:%.*s", params->accessKeyId, b64Len, b64); + "Authorization: AWS %s:%.*s", params->bucketContext.accessKeyId, + b64Len, b64); return S3StatusOK; } // Compose the URI to use for the request given the request parameters -static S3Status compose_uri(Request *request, const RequestParams *params, - const RequestComputedValues *values) +static S3Status compose_uri(char *buffer, int bufferSize, + const S3BucketContext *bucketContext, + const char *urlEncodedKey, + const char *subResource, const char *queryParams) { int len = 0; - -#define uri_append(fmt, ...) \ - do { \ - len += snprintf(&(request->uri[len]), \ - sizeof(request->uri) - len, \ - fmt, __VA_ARGS__); \ - if (len >= sizeof(request->uri)) { \ - return S3StatusUriTooLong; \ - } \ + +#define uri_append(fmt, ...) \ + do { \ + len += snprintf(&(buffer[len]), bufferSize - len, fmt, __VA_ARGS__); \ + if (len >= bufferSize) { \ + return S3StatusUriTooLong; \ + } \ } while (0) - uri_append("http%s://", (params->protocol == S3ProtocolHTTP) ? "" : "s"); + uri_append("http%s://", + (bucketContext->protocol == S3ProtocolHTTP) ? "" : "s"); - if (params->bucketName && params->bucketName[0]) { - if (params->uriStyle == S3UriStyleVirtualHost) { - uri_append("%s.s3.amazonaws.com", params->bucketName); + if (bucketContext->bucketName && + bucketContext->bucketName[0]) { + if (bucketContext->uriStyle == S3UriStyleVirtualHost) { + uri_append("%s.s3.amazonaws.com", bucketContext->bucketName); } else { - uri_append("s3.amazonaws.com/%s", params->bucketName); + uri_append("s3.amazonaws.com/%s", bucketContext->bucketName); } } else { @@ -753,18 +770,17 @@ static S3Status compose_uri(Request *request, const RequestParams *params, uri_append("%s", "/"); - if (params->key && params->key[0]) { - uri_append("%s", values->urlEncodedKey); - } - - if (params->subResource && params->subResource[0]) { - uri_append("%s", params->subResource); + uri_append("%s", urlEncodedKey); + + if (subResource && subResource[0]) { + uri_append("?%s", subResource); } - if (params->queryParams) { - uri_append("%s", params->queryParams); + if (queryParams) { + uri_append("%s%s", (subResource && subResource[0]) ? "&" : "?", + queryParams); } - + return S3StatusOK; } @@ -896,14 +912,14 @@ static S3Status setup_curl(Request *request, // Set request type. switch (params->httpRequestType) { case HttpRequestTypeHEAD: - curl_easy_setopt_safe(CURLOPT_NOBODY, 1); + curl_easy_setopt_safe(CURLOPT_NOBODY, 1); break; case HttpRequestTypePUT: case HttpRequestTypeCOPY: curl_easy_setopt_safe(CURLOPT_UPLOAD, 1); break; case HttpRequestTypeDELETE: - curl_easy_setopt_safe(CURLOPT_CUSTOMREQUEST, "DELETE"); + curl_easy_setopt_safe(CURLOPT_CUSTOMREQUEST, "DELETE"); break; default: // HttpRequestTypeGET break; @@ -961,7 +977,8 @@ static S3Status request_get(const RequestParams *params, } // Initialize the request - request->prev = request->next = 0; + request->prev = 0; + request->next = 0; // Request status is initialized to no error, will be updated whenever // an error occurs @@ -973,7 +990,10 @@ static S3Status request_get(const RequestParams *params, request->headers = 0; // Compute the URL - if ((status = compose_uri(request, params, values)) != S3StatusOK) { + if ((status = compose_uri + (request->uri, sizeof(request->uri), + &(params->bucketContext), values->urlEncodedKey, + params->subResource, params->queryParams)) != S3StatusOK) { curl_easy_cleanup(request->curl); free(request); return status; @@ -990,6 +1010,8 @@ static S3Status request_get(const RequestParams *params, request->toS3Callback = params->toS3Callback; + request->toS3CallbackBytesRemaining = params->toS3CallbackTotalSize; + request->fromS3Callback = params->fromS3Callback; request->completeCallback = params->completeCallback; @@ -1095,9 +1117,10 @@ void request_perform(const RequestParams *params, S3RequestContext *context) RequestComputedValues computed; // Validate the bucket name - if (params->bucketName && + if (params->bucketContext.bucketName && ((status = S3_validate_bucket_name - (params->bucketName, params->uriStyle)) != S3StatusOK)) { + (params->bucketContext.bucketName, + params->bucketContext.uriStyle)) != S3StatusOK)) { return_status(status); } @@ -1121,7 +1144,9 @@ void request_perform(const RequestParams *params, S3RequestContext *context) canonicalize_amz_headers(&computed); // Compute the canonicalized resource - canonicalize_resource(params, &computed); + canonicalize_resource(params->bucketContext.bucketName, + params->subResource, computed.urlEncodedKey, + computed.canonicalizedResource); // Compose Authorization header if ((status = compose_auth_header(params, &computed)) != S3StatusOK) { @@ -1140,8 +1165,8 @@ void request_perform(const RequestParams *params, S3RequestContext *context) if (context->requests) { request->prev = context->requests->prev; request->next = context->requests; - context->requests->prev->next = - context->requests->prev = request; + context->requests->prev->next = request; + context->requests->prev = request; } else { context->requests = request->next = request->prev = request; @@ -1186,6 +1211,11 @@ void request_finish(Request *request) ((request->httpResponseCode < 200) || (request->httpResponseCode > 299))) { switch (request->httpResponseCode) { + case 0: + // This happens if the request never got any HTTP response + // headers at all, we call this a ConnectionFailed error + request->status = S3StatusConnectionFailed; + break; case 100: // Some versions of libcurl erroneously set HTTP // status to this break; @@ -1264,3 +1294,84 @@ S3Status request_curl_code_to_status(CURLcode code) return S3StatusInternalError; } } + + +S3Status S3_generate_authenticated_query_string + (char *buffer, const S3BucketContext *bucketContext, + const char *key, int64_t expires, const char *resource) +{ +#define MAX_EXPIRES (((int64_t) 1 << 31) - 1) + // S3 seems to only accept expiration dates up to the number of seconds + // representably by a signed 32-bit integer + if (expires < 0) { + expires = MAX_EXPIRES; + } + else if (expires > MAX_EXPIRES) { + expires = MAX_EXPIRES; + } + + // xxx todo: rework this so that it can be incorporated into shared code + // with request_perform(). It's really unfortunate that this code is not + // shared with request_perform(). + + // URL encode the key + char urlEncodedKey[S3_MAX_KEY_SIZE * 3]; + if (key) { + urlEncode(urlEncodedKey, key, strlen(key)); + } + else { + urlEncodedKey[0] = 0; + } + + // Compute canonicalized resource + char canonicalizedResource[MAX_CANONICALIZED_RESOURCE_SIZE]; + canonicalize_resource(bucketContext->bucketName, resource, urlEncodedKey, + canonicalizedResource); + + // We allow for: + // 17 bytes for HTTP-Verb + \n + // 1 byte for empty Content-MD5 + \n + // 1 byte for empty Content-Type + \n + // 20 bytes for Expires + \n + // 0 bytes for CanonicalizedAmzHeaders + // CanonicalizedResource + char signbuf[17 + 1 + 1 + 1 + 20 + sizeof(canonicalizedResource) + 1]; + int len = 0; + +#define signbuf_append(format, ...) \ + len += snprintf(&(signbuf[len]), sizeof(signbuf) - len, \ + format, __VA_ARGS__) + + signbuf_append("%s\n", "GET"); // HTTP-Verb + signbuf_append("%s\n", ""); // Content-MD5 + signbuf_append("%s\n", ""); // Content-Type + signbuf_append("%llu\n", (unsigned long long) expires); + signbuf_append("%s", canonicalizedResource); + + // Generate an HMAC-SHA-1 of the signbuf + unsigned char hmac[20]; + + HMAC_SHA1(hmac, (unsigned char *) bucketContext->secretAccessKey, + strlen(bucketContext->secretAccessKey), + (unsigned char *) signbuf, len); + + // Now base-64 encode the results + char b64[((20 + 1) * 4) / 3]; + int b64Len = base64Encode(hmac, 20, b64); + + // Now urlEncode that + char signature[sizeof(b64) * 3]; + urlEncode(signature, b64, b64Len); + + // Finally, compose the uri, with params: + // ?AWSAccessKeyId=xxx[&Expires=]&Signature=xxx + char queryParams[sizeof("AWSAccessKeyId=") + 20 + + sizeof("&Expires=") + 20 + + sizeof("&Signature=") + sizeof(signature) + 1]; + + sprintf(queryParams, "AWSAccessKeyId=%s&Expires=%ld&Signature=%s", + bucketContext->accessKeyId, (long) expires, signature); + + return compose_uri(buffer, S3_MAX_AUTHENTICATED_QUERY_STRING_SIZE, + bucketContext, urlEncodedKey, resource, queryParams); +} diff --git a/src/request_context.c b/src/request_context.c index 567f51a..bccaee8 100644 --- a/src/request_context.c +++ b/src/request_context.c @@ -75,13 +75,25 @@ S3Status S3_runall_request_context(S3RequestContext *requestContext) int requestsRemaining; do { fd_set readfds, writefds, exceptfds; + FD_ZERO(&readfds); + FD_ZERO(&writefds); + FD_ZERO(&exceptfds); int maxfd; S3Status status = S3_get_request_context_fdsets (requestContext, &readfds, &writefds, &exceptfds, &maxfd); if (status != S3StatusOK) { return status; } - select(maxfd + 1, &readfds, &writefds, &exceptfds, 0); + // curl will return -1 if it hasn't even created any fds yet because + // none of the connections have started yet. In this case, don't + // do the select at all, because it will wait forever; instead, just + // skip it and go straight to running the underlying CURL handles + if (maxfd != -1) { + int64_t timeout = S3_get_request_context_timeout(requestContext); + struct timeval tv = { timeout / 1000, (timeout % 1000) * 1000 }; + select(maxfd + 1, &readfds, &writefds, &exceptfds, + (timeout == -1) ? 0 : &tv); + } status = S3_runonce_request_context(requestContext, &requestsRemaining); if (status != S3StatusOK) { @@ -115,14 +127,12 @@ S3Status S3_runonce_request_context(S3RequestContext *requestContext, CURLMsg *msg; int junk; while ((msg = curl_multi_info_read(requestContext->curlm, &junk))) { - if ((msg->msg != CURLMSG_DONE) || - (curl_multi_remove_handle(requestContext->curlm, - msg->easy_handle) != CURLM_OK)) { + if (msg->msg != CURLMSG_DONE) { return S3StatusInternalError; } Request *request; - if (curl_easy_getinfo(msg->easy_handle, CURLOPT_PRIVATE, - (char *) &request) != CURLE_OK) { + if (curl_easy_getinfo(msg->easy_handle, CURLINFO_PRIVATE, + (char **) &request) != CURLE_OK) { return S3StatusInternalError; } // Remove the request from the list of requests @@ -143,9 +153,16 @@ S3Status S3_runonce_request_context(S3RequestContext *requestContext, request->status = request_curl_code_to_status (msg->data.result); } + if (curl_multi_remove_handle(requestContext->curlm, + msg->easy_handle) != CURLM_OK) { + return S3StatusInternalError; + } // Finish the request, ensuring that all callbacks have been made, // and also releases the request request_finish(request); + // Now, since a callback was made, there may be new requests + // queued up to be performed immediately, so do so + status = CURLM_CALL_MULTI_PERFORM; } } while (status == CURLM_CALL_MULTI_PERFORM); @@ -160,3 +177,14 @@ S3Status S3_get_request_context_fdsets(S3RequestContext *requestContext, exceptFdSet, maxFd) == CURLM_OK) ? S3StatusOK : S3StatusInternalError); } + +int64_t S3_get_request_context_timeout(S3RequestContext *requestContext) +{ + long timeout; + + if (curl_multi_timeout(requestContext->curlm, &timeout) != CURLM_OK) { + timeout = 0; + } + + return timeout; +} diff --git a/src/response_headers_handler.c b/src/response_headers_handler.c index cbaa4bf..14e14a0 100644 --- a/src/response_headers_handler.c +++ b/src/response_headers_handler.c @@ -195,10 +195,10 @@ void response_headers_handler_done(ResponseHeadersHandler *handler, CURL *curl) { // Now get the last modification time from curl, since it's easiest to let // curl parse it + time_t lastModified; if (curl_easy_getinfo - (curl, CURLINFO_FILETIME, - &(handler->responseProperties.lastModified)) != CURLE_OK) { - handler->responseProperties.lastModified = -1; + (curl, CURLINFO_FILETIME, &lastModified) == CURLE_OK) { + handler->responseProperties.lastModified = lastModified; } handler->done = 1; @@ -126,6 +126,12 @@ static char errorDetailsG[4096] = { 0 }; #define ALL_DETAILS_PREFIX_LEN (sizeof(ALL_DETAILS_PREFIX) - 1) #define NO_STATUS_PREFIX "noStatus=" #define NO_STATUS_PREFIX_LEN (sizeof(NO_STATUS_PREFIX) - 1) +#define RESOURCE_PREFIX "resource=" +#define RESOURCE_PREFIX_LEN (sizeof(RESOURCE_PREFIX) - 1) +#define TARGET_BUCKET_PREFIX "targetBucket=" +#define TARGET_BUCKET_PREFIX_LEN (sizeof(TARGET_BUCKET_PREFIX) - 1) +#define TARGET_PREFIX_PREFIX "targetPrefix=" +#define TARGET_PREFIX_PREFIX_LEN (sizeof(TARGET_PREFIX_PREFIX) - 1) // util ---------------------------------------------------------------------- @@ -211,6 +217,17 @@ static void usageExit(FILE *out) " <bucket>[/<key>] : Bucket or bucket/key to set the ACL of\n" " [filename] : Input filename for ACL (default is stdin)\n" "\n" +" getlogging : Get the logging status of a bucket\n" +" <bucket> : Bucket to get the logging status of\n" +" [filename] : Output filename for ACL (default is stdout)\n" +"\n" +" setlogging : Set the logging status of a bucket\n" +" <bucket> : Bucket to set the logging status of\n" +" [targetBucket] : Target bucket to log to; if not present, disables\n" +" logging\n" +" [targetPrefix] : Key prefix to use for logs\n" +" [filename] : Input filename for ACL (default is stdin)\n" +"\n" " put : Puts an object\n" " <bucket>/<key> : Bucket/key to put object to\n" " [filename] : Filename to read source data from " @@ -266,7 +283,13 @@ static void usageExit(FILE *out) " [byteCount] : Number of bytes of byte range to return\n" "\n" " head : Gets only the headers of an object, implies -s\n" -" <buckey>/<key> : Bucket/key of object to get headers of\n" +" <bucket>/<key> : Bucket/key of object to get headers of\n" +"\n" +" gqs : Generates an authenticated query string\n" +" <bucket>[/<key>] : Bucket or bucket/key to generate query string for\n" +" [expires] : Expiration date for query string\n" +" [resource] : Sub-resource of key for query string, without a\n" +" leading '?', for example, \"torrent\"\n" "\n" " Canned ACLs:\n" "\n" @@ -437,7 +460,7 @@ static int checkString(const char *str, const char *format) } -static time_t parseIso8601Time(const char *str) +static int64_t parseIso8601Time(const char *str) { // Check to make sure that it has a valid format if (!checkString(str, "dddd-dd-ddTdd:dd:dd")) { @@ -476,7 +499,7 @@ static time_t parseIso8601Time(const char *str) char *tz = getenv("TZ"); setenv("TZ", "UTC", 1); - time_t ret = mktime(&stm); + int64_t ret = mktime(&stm); if (tz) { setenv("TZ", tz, 1); @@ -546,7 +569,7 @@ static int convert_simple_acl(char *aclXml, char *ownerId, } while (0) #define COPY_STRING(field) \ - COPY_STRING_MAXLEN(field, (sizeof(field) - 1)) + COPY_STRING_MAXLEN(field, (int) (sizeof(field) - 1)) while (1) { SKIP_SPACE(0); @@ -604,6 +627,11 @@ static int convert_simple_acl(char *aclXml, char *ownerId, grant->granteeType = S3GranteeTypeAllUsers; aclXml += (sizeof("All Users") - 1); } + else if (!strncmp(aclXml, "Log Delivery", + sizeof("Log Delivery") - 1)) { + grant->granteeType = S3GranteeTypeLogDelivery; + aclXml += (sizeof("Log Delivery") - 1); + } else { return 0; } @@ -674,6 +702,8 @@ static struct option longOptionsG[] = static S3Status responsePropertiesCallback (const S3ResponseProperties *properties, void *callbackData) { + (void) callbackData; + if (!showResponsePropertiesG) { return S3StatusOK; } @@ -696,9 +726,9 @@ static S3Status responsePropertiesCallback print_nonnull("ETag", eTag); if (properties->lastModified > 0) { char timebuf[256]; + time_t t = (time_t) properties->lastModified; // gmtime is not thread-safe but we don't care here. - strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ", - gmtime(&(properties->lastModified))); + strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ", gmtime(&t)); printf("Last-Modified: %s\n", timebuf); } int i; @@ -719,6 +749,8 @@ static void responseCompleteCallback(S3Status status, const S3ErrorDetails *error, void *callbackData) { + (void) callbackData; + statusG = status; // Compose the error details message now, although we might not use it. // Can't just save a pointer to [error] since it's not guaranteed to last @@ -782,7 +814,7 @@ static void printListServiceHeader(int allDetails) static S3Status listServiceCallback(const char *ownerId, const char *ownerDisplayName, const char *bucketName, - time_t creationDate, void *callbackData) + int64_t creationDate, void *callbackData) { list_service_data *data = (list_service_data *) callbackData; @@ -793,8 +825,8 @@ static S3Status listServiceCallback(const char *ownerId, char timebuf[256]; if (creationDate >= 0) { - strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ", - gmtime(&creationDate)); + time_t t = (time_t) creationDate; + strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ", gmtime(&t)); } else { timebuf[0] = 0; @@ -846,18 +878,18 @@ static void list_service(int allDetails) // test bucket --------------------------------------------------------------- -static void test_bucket(int argc, char **argv, int optind) +static void test_bucket(int argc, char **argv, int optindex) { // test bucket - if (optind == argc) { + if (optindex == argc) { fprintf(stderr, "\nERROR: Missing parameter: bucket\n"); usageExit(stderr); } - const char *bucketName = argv[optind++]; + const char *bucketName = argv[optindex++]; - if (optind != argc) { - fprintf(stderr, "\nERROR: Extraneous parameter: %s\n", argv[optind]); + if (optindex != argc) { + fprintf(stderr, "\nERROR: Extraneous parameter: %s\n", argv[optindex]); usageExit(stderr); } @@ -910,14 +942,14 @@ static void test_bucket(int argc, char **argv, int optind) // create bucket ------------------------------------------------------------- -static void create_bucket(int argc, char **argv, int optind) +static void create_bucket(int argc, char **argv, int optindex) { - if (optind == argc) { + if (optindex == argc) { fprintf(stderr, "\nERROR: Missing parameter: bucket\n"); usageExit(stderr); } - const char *bucketName = argv[optind++]; + const char *bucketName = argv[optindex++]; if (!forceG && (S3_validate_bucket_name (bucketName, S3UriStyleVirtualHost) != S3StatusOK)) { @@ -931,8 +963,8 @@ static void create_bucket(int argc, char **argv, int optind) const char *locationConstraint = 0; S3CannedAcl cannedAcl = S3CannedAclPrivate; - while (optind < argc) { - char *param = argv[optind++]; + while (optindex < argc) { + char *param = argv[optindex++]; if (!strncmp(param, LOCATION_PREFIX, LOCATION_PREFIX_LEN)) { locationConstraint = &(param[LOCATION_PREFIX_LEN]); } @@ -987,17 +1019,17 @@ static void create_bucket(int argc, char **argv, int optind) // delete bucket ------------------------------------------------------------- -static void delete_bucket(int argc, char **argv, int optind) +static void delete_bucket(int argc, char **argv, int optindex) { - if (optind == argc) { + if (optindex == argc) { fprintf(stderr, "\nERROR: Missing parameter: bucket\n"); usageExit(stderr); } - const char *bucketName = argv[optind++]; + const char *bucketName = argv[optindex++]; - if (optind != argc) { - fprintf(stderr, "\nERROR: Extraneous parameter: %s\n", argv[optind]); + if (optindex != argc) { + fprintf(stderr, "\nERROR: Extraneous parameter: %s\n", argv[optindex]); usageExit(stderr); } @@ -1090,8 +1122,9 @@ static S3Status listBucketCallback(int isTruncated, const char *nextMarker, const S3ListBucketContent *content = &(contents[i]); char timebuf[256]; if (0) { + time_t t = (time_t) content->lastModified; strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ", - gmtime(&(content->lastModified))); + gmtime(&t)); printf("\nKey: %s\n", content->key); printf("Last Modified: %s\n", timebuf); printf("ETag: %s\n", content->eTag); @@ -1104,8 +1137,9 @@ static S3Status listBucketCallback(int isTruncated, const char *nextMarker, } } else { - strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ", - gmtime(&(content->lastModified))); + time_t t = (time_t) content->lastModified; + strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ", + gmtime(&t)); char sizebuf[16]; if (content->size < 100000) { sprintf(sizebuf, "%5llu", (unsigned long long) content->size); @@ -1203,9 +1237,9 @@ static void list_bucket(const char *bucketName, const char *prefix, } -static void list(int argc, char **argv, int optind) +static void list(int argc, char **argv, int optindex) { - if (optind == argc) { + if (optindex == argc) { list_service(0); return; } @@ -1214,8 +1248,8 @@ static void list(int argc, char **argv, int optind) const char *prefix = 0, *marker = 0, *delimiter = 0; int maxkeys = 0, allDetails = 0; - while (optind < argc) { - char *param = argv[optind++]; + while (optindex < argc) { + char *param = argv[optindex++]; if (!strncmp(param, PREFIX_PREFIX, PREFIX_PREFIX_LEN)) { prefix = &(param[PREFIX_PREFIX_LEN]); } @@ -1259,10 +1293,12 @@ static void list(int argc, char **argv, int optind) // delete object ------------------------------------------------------------- -static void delete_object(int argc, char **argv, int optind) +static void delete_object(int argc, char **argv, int optindex) { + (void) argc; + // Split bucket/key - char *slash = argv[optind]; + char *slash = argv[optindex]; // We know there is a slash in there, put_object is only called if so while (*slash && (*slash != '/')) { @@ -1270,7 +1306,7 @@ static void delete_object(int argc, char **argv, int optind) } *slash++ = 0; - const char *bucketName = argv[optind++]; + const char *bucketName = argv[optindex++]; const char *key = slash; S3_init(); @@ -1323,8 +1359,8 @@ static int putObjectDataCallback(int bufferSize, char *buffer, int ret = 0; if (data->contentLength) { - int toRead = ((data->contentLength > bufferSize) ? - bufferSize : data->contentLength); + int toRead = ((data->contentLength > (unsigned) bufferSize) ? + (unsigned) bufferSize : data->contentLength); if (data->gb) { growbuffer_read(&(data->gb), toRead, &ret, buffer); } @@ -1350,40 +1386,40 @@ static int putObjectDataCallback(int bufferSize, char *buffer, } -static void put_object(int argc, char **argv, int optind) +static void put_object(int argc, char **argv, int optindex) { - if (optind == argc) { + if (optindex == argc) { fprintf(stderr, "\nERROR: Missing parameter: bucket/key\n"); usageExit(stderr); } // Split bucket/key - char *slash = argv[optind]; + char *slash = argv[optindex]; while (*slash && (*slash != '/')) { slash++; } if (!*slash || !*(slash + 1)) { fprintf(stderr, "\nERROR: Invalid bucket/key name: %s\n", - argv[optind]); + argv[optindex]); usageExit(stderr); } *slash++ = 0; - const char *bucketName = argv[optind++]; + const char *bucketName = argv[optindex++]; const char *key = slash; const char *filename = 0; uint64_t contentLength = 0; const char *cacheControl = 0, *contentType = 0, *md5 = 0; const char *contentDispositionFilename = 0, *contentEncoding = 0; - time_t expires = -1; + int64_t expires = -1; S3CannedAcl cannedAcl = S3CannedAclPrivate; int metaPropertiesCount = 0; S3NameValue metaProperties[S3_MAX_METADATA_COUNT]; int noStatus = 0; - while (optind < argc) { - char *param = argv[optind++]; + while (optindex < argc) { + char *param = argv[optindex++]; if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) { filename = &(param[FILENAME_PREFIX_LEN]); } @@ -1521,7 +1557,7 @@ static void put_object(int argc, char **argv, int optind) exit(-1); } contentLength += amtRead; - if (amtRead < sizeof(buffer)) { + if (amtRead < (int) sizeof(buffer)) { break; } } @@ -1589,59 +1625,59 @@ static void put_object(int argc, char **argv, int optind) // copy object --------------------------------------------------------------- -static void copy_object(int argc, char **argv, int optind) +static void copy_object(int argc, char **argv, int optindex) { - if (optind == argc) { + if (optindex == argc) { fprintf(stderr, "\nERROR: Missing parameter: source bucket/key\n"); usageExit(stderr); } // Split bucket/key - char *slash = argv[optind]; + char *slash = argv[optindex]; while (*slash && (*slash != '/')) { slash++; } if (!*slash || !*(slash + 1)) { fprintf(stderr, "\nERROR: Invalid source bucket/key name: %s\n", - argv[optind]); + argv[optindex]); usageExit(stderr); } *slash++ = 0; - const char *sourceBucketName = argv[optind++]; + const char *sourceBucketName = argv[optindex++]; const char *sourceKey = slash; - if (optind == argc) { + if (optindex == argc) { fprintf(stderr, "\nERROR: Missing parameter: " "destination bucket/key\n"); usageExit(stderr); } // Split bucket/key - slash = argv[optind]; + slash = argv[optindex]; while (*slash && (*slash != '/')) { slash++; } if (!*slash || !*(slash + 1)) { fprintf(stderr, "\nERROR: Invalid destination bucket/key name: %s\n", - argv[optind]); + argv[optindex]); usageExit(stderr); } *slash++ = 0; - const char *destinationBucketName = argv[optind++]; + const char *destinationBucketName = argv[optindex++]; const char *destinationKey = slash; const char *cacheControl = 0, *contentType = 0; const char *contentDispositionFilename = 0, *contentEncoding = 0; - time_t expires = -1; + int64_t expires = -1; S3CannedAcl cannedAcl = S3CannedAclPrivate; int metaPropertiesCount = 0; S3NameValue metaProperties[S3_MAX_METADATA_COUNT]; int anyPropertiesSet = 0; - while (optind < argc) { - char *param = argv[optind++]; + while (optindex < argc) { + char *param = argv[optindex++]; if (!strncmp(param, CACHE_CONTROL_PREFIX, CACHE_CONTROL_PREFIX_LEN)) { cacheControl = &(param[CACHE_CONTROL_PREFIX_LEN]); @@ -1749,7 +1785,7 @@ static void copy_object(int argc, char **argv, int optind) &responseCompleteCallback }; - time_t lastModified; + int64_t lastModified; char eTag[256]; do { @@ -1762,8 +1798,9 @@ static void copy_object(int argc, char **argv, int optind) if (statusG == S3StatusOK) { if (lastModified >= 0) { char timebuf[256]; + time_t t = (time_t) lastModified; strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ", - gmtime(&lastModified)); + gmtime(&t)); printf("Last-Modified: %s\n", timebuf); } if (eTag[0]) { @@ -1787,39 +1824,40 @@ static S3Status getObjectDataCallback(int bufferSize, const char *buffer, size_t wrote = fwrite(buffer, 1, bufferSize, outfile); - return (wrote < bufferSize) ? S3StatusAbortedByCallback : S3StatusOK; + return ((wrote < (size_t) bufferSize) ? + S3StatusAbortedByCallback : S3StatusOK); } -static void get_object(int argc, char **argv, int optind) +static void get_object(int argc, char **argv, int optindex) { - if (optind == argc) { + if (optindex == argc) { fprintf(stderr, "\nERROR: Missing parameter: bucket/key\n"); usageExit(stderr); } // Split bucket/key - char *slash = argv[optind]; + char *slash = argv[optindex]; while (*slash && (*slash != '/')) { slash++; } if (!*slash || !*(slash + 1)) { fprintf(stderr, "\nERROR: Invalid bucket/key name: %s\n", - argv[optind]); + argv[optindex]); usageExit(stderr); } *slash++ = 0; - const char *bucketName = argv[optind++]; + const char *bucketName = argv[optindex++]; const char *key = slash; const char *filename = 0; - time_t ifModifiedSince = -1, ifNotModifiedSince = -1; + int64_t ifModifiedSince = -1, ifNotModifiedSince = -1; const char *ifMatch = 0, *ifNotMatch = 0; uint64_t startByte = 0, byteCount = 0; - while (optind < argc) { - char *param = argv[optind++]; + while (optindex < argc) { + char *param = argv[optindex++]; if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) { filename = &(param[FILENAME_PREFIX_LEN]); } @@ -1943,9 +1981,9 @@ static void get_object(int argc, char **argv, int optind) // head object --------------------------------------------------------------- -static void head_object(int argc, char **argv, int optind) +static void head_object(int argc, char **argv, int optindex) { - if (optind == argc) { + if (optindex == argc) { fprintf(stderr, "\nERROR: Missing parameter: bucket/key\n"); usageExit(stderr); } @@ -1954,23 +1992,23 @@ static void head_object(int argc, char **argv, int optind) showResponsePropertiesG = 1; // Split bucket/key - char *slash = argv[optind]; + char *slash = argv[optindex]; while (*slash && (*slash != '/')) { slash++; } if (!*slash || !*(slash + 1)) { fprintf(stderr, "\nERROR: Invalid bucket/key name: %s\n", - argv[optind]); + argv[optindex]); usageExit(stderr); } *slash++ = 0; - const char *bucketName = argv[optind++]; + const char *bucketName = argv[optindex++]; const char *key = slash; - if (optind != argc) { - fprintf(stderr, "\nERROR: Extraneous parameter: %s\n", argv[optind]); + if (optindex != argc) { + fprintf(stderr, "\nERROR: Extraneous parameter: %s\n", argv[optindex]); usageExit(stderr); } @@ -2004,20 +2042,96 @@ static void head_object(int argc, char **argv, int optind) } +// generate query string ------------------------------------------------------ + +static void generate_query_string(int argc, char **argv, int optindex) +{ + if (optindex == argc) { + fprintf(stderr, "\nERROR: Missing parameter: bucket[/key]\n"); + usageExit(stderr); + } + + const char *bucketName = argv[optindex]; + const char *key = 0; + + // Split bucket/key + char *slash = argv[optindex++]; + while (*slash && (*slash != '/')) { + slash++; + } + if (*slash) { + *slash++ = 0; + key = slash; + } + else { + key = 0; + } + + int64_t expires = -1; + + const char *resource = 0; + + while (optindex < argc) { + char *param = argv[optindex++]; + if (!strncmp(param, EXPIRES_PREFIX, EXPIRES_PREFIX_LEN)) { + expires = parseIso8601Time(&(param[EXPIRES_PREFIX_LEN])); + if (expires < 0) { + fprintf(stderr, "\nERROR: Invalid expires time " + "value; ISO 8601 time format required\n"); + usageExit(stderr); + } + } + else if (!strncmp(param, RESOURCE_PREFIX, RESOURCE_PREFIX_LEN)) { + resource = &(param[RESOURCE_PREFIX_LEN]); + } + else { + fprintf(stderr, "\nERROR: Unknown param: %s\n", param); + usageExit(stderr); + } + } + + S3_init(); + + S3BucketContext bucketContext = + { + bucketName, + protocolG, + uriStyleG, + accessKeyIdG, + secretAccessKeyG + }; + + char buffer[S3_MAX_AUTHENTICATED_QUERY_STRING_SIZE]; + + S3Status status = S3_generate_authenticated_query_string + (buffer, &bucketContext, key, expires, resource); + + if (status != S3StatusOK) { + printf("Failed to generate authenticated query string: %s\n", + S3_get_status_name(status)); + } + else { + printf("%s\n", buffer); + } + + S3_deinitialize(); +} + + // get acl ------------------------------------------------------------------- -void get_acl(int argc, char **argv, int optind) +void get_acl(int argc, char **argv, int optindex) { - if (optind == argc) { + if (optindex == argc) { fprintf(stderr, "\nERROR: Missing parameter: bucket[/key]\n"); usageExit(stderr); } - const char *bucketName = argv[optind]; + const char *bucketName = argv[optindex]; const char *key = 0; // Split bucket/key - char *slash = argv[optind++]; + char *slash = argv[optindex++]; while (*slash && (*slash != '/')) { slash++; } @@ -2031,8 +2145,8 @@ void get_acl(int argc, char **argv, int optind) const char *filename = 0; - while (optind < argc) { - char *param = argv[optind++]; + while (optindex < argc) { + char *param = argv[optindex++]; if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) { filename = &(param[FILENAME_PREFIX_LEN]); } @@ -2131,10 +2245,14 @@ void get_acl(int argc, char **argv, int optind) type = "Group"; id = "Authenticated AWS Users"; break; - default: + case S3GranteeTypeAllUsers: type = "Group"; id = "All Users"; break; + default: + type = "Group"; + id = "Log Delivery"; + break; } const char *perm; switch (grant->permission) { @@ -2169,18 +2287,18 @@ void get_acl(int argc, char **argv, int optind) // set acl ------------------------------------------------------------------- -void set_acl(int argc, char **argv, int optind) +void set_acl(int argc, char **argv, int optindex) { - if (optind == argc) { + if (optindex == argc) { fprintf(stderr, "\nERROR: Missing parameter: bucket[/key]\n"); usageExit(stderr); } - const char *bucketName = argv[optind]; + const char *bucketName = argv[optindex]; const char *key = 0; // Split bucket/key - char *slash = argv[optind++]; + char *slash = argv[optindex++]; while (*slash && (*slash != '/')) { slash++; } @@ -2194,8 +2312,8 @@ void set_acl(int argc, char **argv, int optind) const char *filename = 0; - while (optind < argc) { - char *param = argv[optind++]; + while (optindex < argc) { + char *param = argv[optindex++]; if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) { filename = &(param[FILENAME_PREFIX_LEN]); } @@ -2267,14 +2385,269 @@ void set_acl(int argc, char **argv, int optind) } +// get logging ---------------------------------------------------------------- + +void get_logging(int argc, char **argv, int optindex) +{ + if (optindex == argc) { + fprintf(stderr, "\nERROR: Missing parameter: bucket\n"); + usageExit(stderr); + } + + const char *bucketName = argv[optindex++]; + const char *filename = 0; + + while (optindex < argc) { + char *param = argv[optindex++]; + if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) { + filename = &(param[FILENAME_PREFIX_LEN]); + } + else { + fprintf(stderr, "\nERROR: Unknown param: %s\n", param); + usageExit(stderr); + } + } + + FILE *outfile = 0; + + if (filename) { + // Stat the file, and if it doesn't exist, open it in w mode + struct stat buf; + if (stat(filename, &buf) == -1) { + outfile = fopen(filename, "w" FOPEN_EXTRA_FLAGS); + } + else { + // Open in r+ so that we don't truncate the file, just in case + // there is an error and we write no bytes, we leave the file + // unmodified + outfile = fopen(filename, "r+" FOPEN_EXTRA_FLAGS); + } + + if (!outfile) { + fprintf(stderr, "\nERROR: Failed to open output file %s: ", + filename); + perror(0); + exit(-1); + } + } + else if (showResponsePropertiesG) { + fprintf(stderr, "\nERROR: getlogging -s requires a filename " + "parameter\n"); + usageExit(stderr); + } + else { + outfile = stdout; + } + + int aclGrantCount; + S3AclGrant aclGrants[S3_MAX_ACL_GRANT_COUNT]; + char targetBucket[S3_MAX_BUCKET_NAME_SIZE]; + char targetPrefix[S3_MAX_KEY_SIZE]; + + S3_init(); + + S3BucketContext bucketContext = + { + bucketName, + protocolG, + uriStyleG, + accessKeyIdG, + secretAccessKeyG + }; + + S3ResponseHandler responseHandler = + { + &responsePropertiesCallback, + &responseCompleteCallback + }; + + do { + S3_get_server_access_logging(&bucketContext, targetBucket, targetPrefix, + &aclGrantCount, aclGrants, 0, + &responseHandler, 0); + } while (S3_status_is_retryable(statusG) && should_retry()); + + if (statusG == S3StatusOK) { + if (targetBucket[0]) { + printf("Target Bucket: %s\n", targetBucket); + if (targetPrefix[0]) { + printf("Target Prefix: %s\n", targetPrefix); + } + fprintf(outfile, "%-6s %-90s %-12s\n", " Type", + " User Identifier", + " Permission"); + fprintf(outfile, "------ " + "---------------------------------------------------------" + "--------------------------------- ------------\n"); + int i; + for (i = 0; i < aclGrantCount; i++) { + S3AclGrant *grant = &(aclGrants[i]); + const char *type; + char composedId[S3_MAX_GRANTEE_USER_ID_SIZE + + S3_MAX_GRANTEE_DISPLAY_NAME_SIZE + 16]; + const char *id; + + switch (grant->granteeType) { + case S3GranteeTypeAmazonCustomerByEmail: + type = "Email"; + id = grant->grantee.amazonCustomerByEmail.emailAddress; + break; + case S3GranteeTypeCanonicalUser: + type = "UserID"; + snprintf(composedId, sizeof(composedId), + "%s (%s)", grant->grantee.canonicalUser.id, + grant->grantee.canonicalUser.displayName); + id = composedId; + break; + case S3GranteeTypeAllAwsUsers: + type = "Group"; + id = "Authenticated AWS Users"; + break; + default: + type = "Group"; + id = "All Users"; + break; + } + const char *perm; + switch (grant->permission) { + case S3PermissionRead: + perm = "READ"; + break; + case S3PermissionWrite: + perm = "WRITE"; + break; + case S3PermissionReadACP: + perm = "READ_ACP"; + break; + case S3PermissionWriteACP: + perm = "WRITE_ACP"; + break; + default: + perm = "FULL_CONTROL"; + break; + } + fprintf(outfile, "%-6s %-90s %-12s\n", type, id, perm); + } + } + else { + printf("Service logging is not enabled for this bucket.\n"); + } + } + else { + printError(); + } + + fclose(outfile); + + S3_deinitialize(); +} + + +// set logging ---------------------------------------------------------------- + +void set_logging(int argc, char **argv, int optindex) +{ + if (optindex == argc) { + fprintf(stderr, "\nERROR: Missing parameter: bucket\n"); + usageExit(stderr); + } + + const char *bucketName = argv[optindex++]; + + const char *targetBucket = 0, *targetPrefix = 0, *filename = 0; + + while (optindex < argc) { + char *param = argv[optindex++]; + if (!strncmp(param, TARGET_BUCKET_PREFIX, TARGET_BUCKET_PREFIX_LEN)) { + targetBucket = &(param[TARGET_BUCKET_PREFIX_LEN]); + } + else if (!strncmp(param, TARGET_PREFIX_PREFIX, + TARGET_PREFIX_PREFIX_LEN)) { + targetPrefix = &(param[TARGET_PREFIX_PREFIX_LEN]); + } + else if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) { + filename = &(param[FILENAME_PREFIX_LEN]); + } + else { + fprintf(stderr, "\nERROR: Unknown param: %s\n", param); + usageExit(stderr); + } + } + + int aclGrantCount = 0; + S3AclGrant aclGrants[S3_MAX_ACL_GRANT_COUNT]; + + if (targetBucket) { + FILE *infile; + + if (filename) { + if (!(infile = fopen(filename, "r" FOPEN_EXTRA_FLAGS))) { + fprintf(stderr, "\nERROR: Failed to open input file %s: ", + filename); + perror(0); + exit(-1); + } + } + else { + infile = stdin; + } + + // Read in the complete ACL + char aclBuf[65536]; + aclBuf[fread(aclBuf, 1, sizeof(aclBuf), infile)] = 0; + char ownerId[S3_MAX_GRANTEE_USER_ID_SIZE]; + char ownerDisplayName[S3_MAX_GRANTEE_DISPLAY_NAME_SIZE]; + + // Parse it + if (!convert_simple_acl(aclBuf, ownerId, ownerDisplayName, + &aclGrantCount, aclGrants)) { + fprintf(stderr, "\nERROR: Failed to parse ACLs\n"); + fclose(infile); + exit(-1); + } + + fclose(infile); + } + + S3_init(); + + S3BucketContext bucketContext = + { + bucketName, + protocolG, + uriStyleG, + accessKeyIdG, + secretAccessKeyG + }; + + S3ResponseHandler responseHandler = + { + &responsePropertiesCallback, + &responseCompleteCallback + }; + + do { + S3_set_server_access_logging(&bucketContext, targetBucket, + targetPrefix, aclGrantCount, aclGrants, + 0, &responseHandler, 0); + } while (S3_status_is_retryable(statusG) && should_retry()); + + if (statusG != S3StatusOK) { + printError(); + } + + S3_deinitialize(); +} + + // main ---------------------------------------------------------------------- int main(int argc, char **argv) { // Parse args while (1) { - int index = 0; - int c = getopt_long(argc, argv, "fhusr:", longOptionsG, &index); + int idx = 0; + int c = getopt_long(argc, argv, "fhusr:", longOptionsG, &idx); if (c == -1) { // End of options @@ -2304,7 +2677,7 @@ int main(int argc, char **argv) break; } default: - fprintf(stderr, "\nERROR: Unknown options: -%c\n", c); + fprintf(stderr, "\nERROR: Unknown option: -%c\n", c); // Usage exit usageExit(stderr); } @@ -2378,12 +2751,21 @@ int main(int argc, char **argv) else if (!strcmp(command, "head")) { head_object(argc, argv, optind); } + else if (!strcmp(command, "gqs")) { + generate_query_string(argc, argv, optind); + } else if (!strcmp(command, "getacl")) { get_acl(argc, argv, optind); } else if (!strcmp(command, "setacl")) { set_acl(argc, argv, optind); } + else if (!strcmp(command, "getlogging")) { + get_logging(argc, argv, optind); + } + else if (!strcmp(command, "setlogging")) { + set_logging(argc, argv, optind); + } else { fprintf(stderr, "Unknown command: %s\n", command); return -1; diff --git a/src/service.c b/src/service.c index edb5347..216b981 100644 --- a/src/service.c +++ b/src/service.c @@ -158,14 +158,14 @@ void S3_list_service(S3Protocol protocol, const char *accessKeyId, RequestParams params = { HttpRequestTypeGET, // httpRequestType - protocol, // protocol - S3UriStylePath, // uriStyle - 0, // bucketName + { 0, // bucketName + protocol, // protocol + S3UriStylePath, // uriStyle + accessKeyId, // accessKeyId + secretAccessKey }, // secretAccessKey 0, // key 0, // queryParams 0, // subResource - accessKeyId, // accessKeyId - secretAccessKey, // secretAccessKey 0, // copySourceBucketName 0, // copySourceKey 0, // getConditions diff --git a/src/service_access_logging.c b/src/service_access_logging.c new file mode 100644 index 0000000..cbed2c1 --- /dev/null +++ b/src/service_access_logging.c @@ -0,0 +1,556 @@ +/** ************************************************************************** + * server_access_logging.c + * + * Copyright 2008 Bryan Ischo <bryan@ischo.com> + * + * This file is part of libs3. + * + * libs3 is free software: you can redistribute it and/or modify it under the + * terms of the GNU General Public License as published by the Free Software + * Foundation, version 3 of the License. + * + * In addition, as a special exception, the copyright holders give + * permission to link the code of this library and its programs with the + * OpenSSL library, and distribute linked combinations including the two. + * + * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License version 3 + * along with libs3, in a file named COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + ************************************************************************** **/ + +#include <stdlib.h> +#include <string.h> +#include "libs3.h" +#include "request.h" + + +// get server access logging--------------------------------------------------- + +typedef struct ConvertBlsData +{ + char *targetBucketReturn; + int targetBucketReturnLen; + char *targetPrefixReturn; + int targetPrefixReturnLen; + int *aclGrantCountReturn; + S3AclGrant *aclGrants; + + string_buffer(emailAddress, S3_MAX_GRANTEE_EMAIL_ADDRESS_SIZE); + string_buffer(userId, S3_MAX_GRANTEE_USER_ID_SIZE); + string_buffer(userDisplayName, S3_MAX_GRANTEE_DISPLAY_NAME_SIZE); + string_buffer(groupUri, 128); + string_buffer(permission, 32); +} ConvertBlsData; + + +static S3Status convertBlsXmlCallback(const char *elementPath, + const char *data, int dataLen, + void *callbackData) +{ + ConvertBlsData *caData = (ConvertBlsData *) callbackData; + + int fit; + + if (data) { + if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/" + "TargetBucket")) { + caData->targetBucketReturnLen += + snprintf(&(caData->targetBucketReturn + [caData->targetBucketReturnLen]), + 255 - caData->targetBucketReturnLen - 1, + "%.*s", dataLen, data); + if (caData->targetBucketReturnLen >= 255) { + return S3StatusTargetBucketTooLong; + } + } + else if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/" + "TargetPrefix")) { + caData->targetPrefixReturnLen += + snprintf(&(caData->targetPrefixReturn + [caData->targetPrefixReturnLen]), + 255 - caData->targetPrefixReturnLen - 1, + "%.*s", dataLen, data); + if (caData->targetPrefixReturnLen >= 255) { + return S3StatusTargetPrefixTooLong; + } + } + else if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/" + "TargetGrants/Grant/Grantee/EmailAddress")) { + // AmazonCustomerByEmail + string_buffer_append(caData->emailAddress, data, dataLen, fit); + if (!fit) { + return S3StatusEmailAddressTooLong; + } + } + else if (!strcmp(elementPath, + "AccessControlPolicy/AccessControlList/Grant/" + "Grantee/ID")) { + // CanonicalUser + string_buffer_append(caData->userId, data, dataLen, fit); + if (!fit) { + return S3StatusUserIdTooLong; + } + } + else if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/" + "TargetGrants/Grant/Grantee/DisplayName")) { + // CanonicalUser + string_buffer_append(caData->userDisplayName, data, dataLen, fit); + if (!fit) { + return S3StatusUserDisplayNameTooLong; + } + } + else if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/" + "TargetGrants/Grant/Grantee/URI")) { + // Group + string_buffer_append(caData->groupUri, data, dataLen, fit); + if (!fit) { + return S3StatusGroupUriTooLong; + } + } + else if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/" + "TargetGrants/Grant/Permission")) { + // Permission + string_buffer_append(caData->permission, data, dataLen, fit); + if (!fit) { + return S3StatusPermissionTooLong; + } + } + } + else { + if (!strcmp(elementPath, "BucketLoggingStatus/LoggingEnabled/" + "TargetGrants/Grant")) { + // A grant has just been completed; so add the next S3AclGrant + // based on the values read + if (*(caData->aclGrantCountReturn) == S3_MAX_ACL_GRANT_COUNT) { + return S3StatusTooManyGrants; + } + + S3AclGrant *grant = &(caData->aclGrants + [*(caData->aclGrantCountReturn)]); + + if (caData->emailAddress[0]) { + grant->granteeType = S3GranteeTypeAmazonCustomerByEmail; + strcpy(grant->grantee.amazonCustomerByEmail.emailAddress, + caData->emailAddress); + } + else if (caData->userId[0] && caData->userDisplayName[0]) { + grant->granteeType = S3GranteeTypeCanonicalUser; + strcpy(grant->grantee.canonicalUser.id, caData->userId); + strcpy(grant->grantee.canonicalUser.displayName, + caData->userDisplayName); + } + else if (caData->groupUri[0]) { + if (!strcmp(caData->groupUri, + "http://acs.amazonaws.com/groups/global/" + "AuthenticatedUsers")) { + grant->granteeType = S3GranteeTypeAllAwsUsers; + } + else if (!strcmp(caData->groupUri, + "http://acs.amazonaws.com/groups/global/" + "AllUsers")) { + grant->granteeType = S3GranteeTypeAllUsers; + } + else { + return S3StatusBadGrantee; + } + } + else { + return S3StatusBadGrantee; + } + + if (!strcmp(caData->permission, "READ")) { + grant->permission = S3PermissionRead; + } + else if (!strcmp(caData->permission, "WRITE")) { + grant->permission = S3PermissionWrite; + } + else if (!strcmp(caData->permission, "READ_ACP")) { + grant->permission = S3PermissionReadACP; + } + else if (!strcmp(caData->permission, "WRITE_ACP")) { + grant->permission = S3PermissionWriteACP; + } + else if (!strcmp(caData->permission, "FULL_CONTROL")) { + grant->permission = S3PermissionFullControl; + } + else { + return S3StatusBadPermission; + } + + (*(caData->aclGrantCountReturn))++; + + string_buffer_initialize(caData->emailAddress); + string_buffer_initialize(caData->userId); + string_buffer_initialize(caData->userDisplayName); + string_buffer_initialize(caData->groupUri); + string_buffer_initialize(caData->permission); + } + } + + return S3StatusOK; +} + + +static S3Status convert_bls(char *blsXml, char *targetBucketReturn, + char *targetPrefixReturn, int *aclGrantCountReturn, + S3AclGrant *aclGrants) +{ + ConvertBlsData data; + + data.targetBucketReturn = targetBucketReturn; + data.targetBucketReturn[0] = 0; + data.targetBucketReturnLen = 0; + data.targetPrefixReturn = targetPrefixReturn; + data.targetPrefixReturn[0] = 0; + data.targetPrefixReturnLen = 0; + data.aclGrantCountReturn = aclGrantCountReturn; + data.aclGrants = aclGrants; + *aclGrantCountReturn = 0; + string_buffer_initialize(data.emailAddress); + string_buffer_initialize(data.userId); + string_buffer_initialize(data.userDisplayName); + string_buffer_initialize(data.groupUri); + string_buffer_initialize(data.permission); + + // Use a simplexml parser + SimpleXml simpleXml; + simplexml_initialize(&simpleXml, &convertBlsXmlCallback, &data); + + S3Status status = simplexml_add(&simpleXml, blsXml, strlen(blsXml)); + + simplexml_deinitialize(&simpleXml); + + return status; +} + + +// Use a rather arbitrary max size for the document of 64K +#define BLS_XML_DOC_MAXSIZE (64 * 1024) + + +typedef struct GetBlsData +{ + SimpleXml simpleXml; + + S3ResponsePropertiesCallback *responsePropertiesCallback; + S3ResponseCompleteCallback *responseCompleteCallback; + void *callbackData; + + char *targetBucketReturn; + char *targetPrefixReturn; + int *aclGrantCountReturn; + S3AclGrant *aclGrants; + string_buffer(blsXmlDocument, BLS_XML_DOC_MAXSIZE); +} GetBlsData; + + +static S3Status getBlsPropertiesCallback + (const S3ResponseProperties *responseProperties, void *callbackData) +{ + GetBlsData *gsData = (GetBlsData *) callbackData; + + return (*(gsData->responsePropertiesCallback)) + (responseProperties, gsData->callbackData); +} + + +static S3Status getBlsDataCallback(int bufferSize, const char *buffer, + void *callbackData) +{ + GetBlsData *gsData = (GetBlsData *) callbackData; + + int fit; + + string_buffer_append(gsData->blsXmlDocument, buffer, bufferSize, fit); + + return fit ? S3StatusOK : S3StatusXmlDocumentTooLarge; +} + + +static void getBlsCompleteCallback(S3Status requestStatus, + const S3ErrorDetails *s3ErrorDetails, + void *callbackData) +{ + GetBlsData *gsData = (GetBlsData *) callbackData; + + if (requestStatus == S3StatusOK) { + // Parse the document + requestStatus = convert_bls + (gsData->blsXmlDocument, gsData->targetBucketReturn, + gsData->targetPrefixReturn, gsData->aclGrantCountReturn, + gsData->aclGrants); + } + + (*(gsData->responseCompleteCallback)) + (requestStatus, s3ErrorDetails, gsData->callbackData); + + free(gsData); +} + + +void S3_get_server_access_logging(const S3BucketContext *bucketContext, + char *targetBucketReturn, + char *targetPrefixReturn, + int *aclGrantCountReturn, + S3AclGrant *aclGrants, + S3RequestContext *requestContext, + const S3ResponseHandler *handler, + void *callbackData) +{ + // Create the callback data + GetBlsData *gsData = (GetBlsData *) malloc(sizeof(GetBlsData)); + if (!gsData) { + (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData); + return; + } + + gsData->responsePropertiesCallback = handler->propertiesCallback; + gsData->responseCompleteCallback = handler->completeCallback; + gsData->callbackData = callbackData; + + gsData->targetBucketReturn = targetBucketReturn; + gsData->targetPrefixReturn = targetPrefixReturn; + gsData->aclGrantCountReturn = aclGrantCountReturn; + gsData->aclGrants = aclGrants; + string_buffer_initialize(gsData->blsXmlDocument); + *aclGrantCountReturn = 0; + + // Set up the RequestParams + RequestParams params = + { + HttpRequestTypeGET, // httpRequestType + { bucketContext->bucketName, // bucketName + bucketContext->protocol, // protocol + bucketContext->uriStyle, // uriStyle + bucketContext->accessKeyId, // accessKeyId + bucketContext->secretAccessKey }, // secretAccessKey + 0, // key + 0, // queryParams + "logging", // subResource + 0, // copySourceBucketName + 0, // copySourceKey + 0, // getConditions + 0, // startByte + 0, // byteCount + 0, // putProperties + &getBlsPropertiesCallback, // propertiesCallback + 0, // toS3Callback + 0, // toS3CallbackTotalSize + &getBlsDataCallback, // fromS3Callback + &getBlsCompleteCallback, // completeCallback + gsData // callbackData + }; + + // Perform the request + request_perform(¶ms, requestContext); +} + + + +// set server access logging--------------------------------------------------- + +static S3Status generateSalXmlDocument(const char *targetBucket, + const char *targetPrefix, + int aclGrantCount, + const S3AclGrant *aclGrants, + int *xmlDocumentLenReturn, + char *xmlDocument, + int xmlDocumentBufferSize) +{ + *xmlDocumentLenReturn = 0; + +#define append(fmt, ...) \ + do { \ + *xmlDocumentLenReturn += snprintf \ + (&(xmlDocument[*xmlDocumentLenReturn]), \ + xmlDocumentBufferSize - *xmlDocumentLenReturn - 1, \ + fmt, __VA_ARGS__); \ + if (*xmlDocumentLenReturn >= xmlDocumentBufferSize) { \ + return S3StatusXmlDocumentTooLarge; \ + } \ + } while (0) + + append("%s", "<BucketLoggingStatus " + "xmlns=\"http://doc.s3.amazonaws.com/2006-03-01\">"); + + if (targetBucket && targetBucket[0]) { + append("<LoggingEnabled><TargetBucket>%s</TargetBucket>", targetBucket); + append("<TargetPrefix>%s</TargetPrefix>", + targetPrefix ? targetPrefix : ""); + + if (aclGrantCount) { + append("%s", "<TargetGrants>"); + int i; + for (i = 0; i < aclGrantCount; i++) { + append("%s", "<Grant><Grantee " + "xmlns:xsi=\"http://www.w3.org/2001/" + "XMLSchema-instance\" xsi:type=\""); + const S3AclGrant *grant = &(aclGrants[i]); + switch (grant->granteeType) { + case S3GranteeTypeAmazonCustomerByEmail: + append("AmazonCustomerByEmail\"><EmailAddress>%s" + "</EmailAddress>", + grant->grantee.amazonCustomerByEmail.emailAddress); + break; + case S3GranteeTypeCanonicalUser: + append("CanonicalUser\"><ID>%s</ID><DisplayName>%s" + "</DisplayName>", + grant->grantee.canonicalUser.id, + grant->grantee.canonicalUser.displayName); + break; + default: // case S3GranteeTypeAllAwsUsers/S3GranteeTypeAllUsers: + append("Group\"><URI>http://acs.amazonaws.com/groups/" + "global/%s</URI>", + (grant->granteeType == S3GranteeTypeAllAwsUsers) ? + "AuthenticatedUsers" : "AllUsers"); + break; + } + append("</Grantee><Permission>%s</Permission></Grant>", + ((grant->permission == S3PermissionRead) ? "READ" : + (grant->permission == S3PermissionWrite) ? "WRITE" : + (grant->permission == + S3PermissionReadACP) ? "READ_ACP" : + (grant->permission == + S3PermissionWriteACP) ? "WRITE_ACP" : "FULL_CONTROL")); + } + append("%s", "</TargetGrants>"); + } + append("%s", "</LoggingEnabled>"); + } + + append("%s", "</BucketLoggingStatus>"); + + return S3StatusOK; +} + + +typedef struct SetSalData +{ + S3ResponsePropertiesCallback *responsePropertiesCallback; + S3ResponseCompleteCallback *responseCompleteCallback; + void *callbackData; + + int salXmlDocumentLen; + char salXmlDocument[BLS_XML_DOC_MAXSIZE]; + int salXmlDocumentBytesWritten; + +} SetSalData; + + +static S3Status setSalPropertiesCallback + (const S3ResponseProperties *responseProperties, void *callbackData) +{ + SetSalData *paData = (SetSalData *) callbackData; + + return (*(paData->responsePropertiesCallback)) + (responseProperties, paData->callbackData); +} + + +static int setSalDataCallback(int bufferSize, char *buffer, void *callbackData) +{ + SetSalData *paData = (SetSalData *) callbackData; + + int remaining = (paData->salXmlDocumentLen - + paData->salXmlDocumentBytesWritten); + + int toCopy = bufferSize > remaining ? remaining : bufferSize; + + if (!toCopy) { + return 0; + } + + memcpy(buffer, &(paData->salXmlDocument + [paData->salXmlDocumentBytesWritten]), toCopy); + + paData->salXmlDocumentBytesWritten += toCopy; + + return toCopy; +} + + +static void setSalCompleteCallback(S3Status requestStatus, + const S3ErrorDetails *s3ErrorDetails, + void *callbackData) +{ + SetSalData *paData = (SetSalData *) callbackData; + + (*(paData->responseCompleteCallback)) + (requestStatus, s3ErrorDetails, paData->callbackData); + + free(paData); +} + + +void S3_set_server_access_logging(const S3BucketContext *bucketContext, + const char *targetBucket, + const char *targetPrefix, int aclGrantCount, + const S3AclGrant *aclGrants, + S3RequestContext *requestContext, + const S3ResponseHandler *handler, + void *callbackData) +{ + if (aclGrantCount > S3_MAX_ACL_GRANT_COUNT) { + (*(handler->completeCallback)) + (S3StatusTooManyGrants, 0, callbackData); + return; + } + + SetSalData *data = (SetSalData *) malloc(sizeof(SetSalData)); + if (!data) { + (*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData); + return; + } + + // Convert aclGrants to XML document + S3Status status = generateSalXmlDocument + (targetBucket, targetPrefix, aclGrantCount, aclGrants, + &(data->salXmlDocumentLen), data->salXmlDocument, + sizeof(data->salXmlDocument)); + if (status != S3StatusOK) { + free(data); + (*(handler->completeCallback))(status, 0, callbackData); + return; + } + + data->responsePropertiesCallback = handler->propertiesCallback; + data->responseCompleteCallback = handler->completeCallback; + data->callbackData = callbackData; + + data->salXmlDocumentBytesWritten = 0; + + // Set up the RequestParams + RequestParams params = + { + HttpRequestTypePUT, // httpRequestType + { bucketContext->bucketName, // bucketName + bucketContext->protocol, // protocol + bucketContext->uriStyle, // uriStyle + bucketContext->accessKeyId, // accessKeyId + bucketContext->secretAccessKey }, // secretAccessKey + 0, // key + 0, // queryParams + "logging", // subResource + 0, // copySourceBucketName + 0, // copySourceKey + 0, // getConditions + 0, // startByte + 0, // byteCount + 0, // putProperties + &setSalPropertiesCallback, // propertiesCallback + &setSalDataCallback, // toS3Callback + data->salXmlDocumentLen, // toS3CallbackTotalSize + 0, // fromS3Callback + &setSalCompleteCallback, // completeCallback + data // callbackData + }; + + // Perform the request + request_perform(¶ms, requestContext); +} diff --git a/src/simplexml.c b/src/simplexml.c index 12eda26..bd8616b 100644 --- a/src/simplexml.c +++ b/src/simplexml.c @@ -47,6 +47,8 @@ static xmlEntityPtr saxGetEntity(void *user_data, const xmlChar *name) { + (void) user_data; + return xmlGetPredefinedEntity(name); } @@ -54,6 +56,8 @@ static xmlEntityPtr saxGetEntity(void *user_data, const xmlChar *name) static void saxStartElement(void *user_data, const xmlChar *nameUtf8, const xmlChar **attr) { + (void) attr; + SimpleXml *simpleXml = (SimpleXml *) user_data; if (simpleXml->status != S3StatusOK) { @@ -67,7 +71,7 @@ static void saxStartElement(void *user_data, const xmlChar *nameUtf8, int len = strlen(name); if ((simpleXml->elementPathLen + len + 1) >= - sizeof(simpleXml->elementPath)) { + (int) sizeof(simpleXml->elementPath)) { // Cannot handle this element, stop! simpleXml->status = S3StatusXmlParseFailure; return; @@ -83,6 +87,8 @@ static void saxStartElement(void *user_data, const xmlChar *nameUtf8, static void saxEndElement(void *user_data, const xmlChar *name) { + (void) name; + SimpleXml *simpleXml = (SimpleXml *) user_data; if (simpleXml->status != S3StatusOK) { @@ -117,6 +123,8 @@ static void saxCharacters(void *user_data, const xmlChar *ch, int len) static void saxError(void *user_data, const char *msg, ...) { + (void) msg; + SimpleXml *simpleXml = (SimpleXml *) user_data; if (simpleXml->status != S3StatusOK) { diff --git a/src/testsimplexml.c b/src/testsimplexml.c index 7210074..57fba7d 100644 --- a/src/testsimplexml.c +++ b/src/testsimplexml.c @@ -33,6 +33,8 @@ static S3Status simpleXmlCallback(const char *elementPath, const char *data, int dataLen, void *callbackData) { + (void) callbackData; + printf("[%s]: [%.*s]\n", elementPath, dataLen, data); return S3StatusOK; @@ -95,7 +95,7 @@ int urlEncode(char *dest, const char *src, int maxSrcSize) } -time_t parseIso8601Time(const char *str) +int64_t parseIso8601Time(const char *str) { // Check to make sure that it has a valid format if (!checkString(str, "dddd-dd-ddTdd:dd:dd")) { @@ -130,7 +130,7 @@ time_t parseIso8601Time(const char *str) stm.tm_isdst = -1; - time_t ret = mktime(&stm); + int64_t ret = mktime(&stm); // Skip the millis @@ -173,12 +173,12 @@ uint64_t parseUnsignedInt(const char *str) } -int base64Encode(const unsigned char *in, int inLen, unsigned char *out) +int base64Encode(const unsigned char *in, int inLen, char *out) { static const char *ENC = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; - unsigned char *original_out = out; + char *original_out = out; while (inLen) { // first 6 bits of char 1 |