aws lambda function getting access denied when getObject from s3

Deadly 提交于 2019-11-30 06:31:27

问题


I am getting an acccess denied error from S3 AWS service on my Lambda function.

This is the code:

// dependencies
var async = require('async');
var AWS = require('aws-sdk');
var gm = require('gm').subClass({ imageMagick: true }); // Enable ImageMagick integration.

exports.handler = function(event, context) {
    var srcBucket = event.Records[0].s3.bucket.name;
    // Object key may have spaces or unicode non-ASCII characters.
    var key = decodeURIComponent(event.Records[0].s3.object.key.replace(/\+/g, " "));
/*
{
    originalFilename: <string>,
    versions: [
        {
            size: <number>,
            crop: [x,y],
            max: [x, y],
            rotate: <number>
        }
    ]
}*/
    var fileInfo;
    var dstBucket = "xmovo.transformedimages.develop";
    try {
        //TODO: Decompress and decode the returned value
        fileInfo = JSON.parse(key);
        //download s3File

        // get reference to S3 client
        var s3 = new AWS.S3();

        // Download the image from S3 into a buffer.
        s3.getObject({
                Bucket: srcBucket,
                Key: key
            },
            function (err, response) {
                if (err) {
                    console.log("Error getting from s3: >>> " + err + "::: Bucket-Key >>>" + srcBucket + "-" + key + ":::Principal>>>" + event.Records[0].userIdentity.principalId, err.stack);
                    return;
                }

                // Infer the image type.
                var img = gm(response.Body);
                var imageType = null;
                img.identify(function (err, data) {
                    if (err) {
                        console.log("Error image type: >>> " + err);
                        deleteFromS3(srcBucket, key);
                        return;
                    }
                    imageType = data.format;

                    //foreach of the versions requested
                    async.each(fileInfo.versions, function (currentVersion, callback) {
                        //apply transform
                        async.waterfall([async.apply(transform, response, currentVersion), uploadToS3, callback]);

                    }, function (err) {
                        if (err) console.log("Error on excecution of watefall: >>> " + err);
                        else {
                            //when all done then delete the original image from srcBucket
                            deleteFromS3(srcBucket, key);
                        }
                    });
                });
            });
    }
    catch (ex){
        context.fail("exception through: " + ex);
        deleteFromS3(srcBucket, key);
        return;
    }
        function transform(response, version, callback){
            var imageProcess = gm(response.Body);
            if (version.rotate!=0) imageProcess = imageProcess.rotate("black",version.rotate);
            if(version.size!=null) {
                if (version.crop != null) {
                    //crop the image from the coordinates
                    imageProcess=imageProcess.crop(version.size[0], version.size[1], version.crop[0], version.crop[1]);
                }
                else {
                    //find the bigger and resize proportioned the other dimension
                    var widthIsMax = version.size[0]>version.size[1];
                    var maxValue = Math.max(version.size[0],version.size[1]);
                    imageProcess=(widthIsMax)?imageProcess.resize(maxValue):imageProcess.resize(null, maxValue);
                }
            }


            //finally convert the image to jpg 90%
            imageProcess.toBuffer("jpg",{quality:90}, function(err, buffer){
                if (err) callback(err);
                callback(null, version, "image/jpeg", buffer);
            });

        }

        function deleteFromS3(bucket, filename){
            s3.deleteObject({
                Bucket: bucket,
                Key: filename
            });
        }

        function uploadToS3(version, contentType, data, callback) {
            // Stream the transformed image to a different S3 bucket.
            var dstKey = fileInfo.originalFilename + "_" + version.size + ".jpg";
            s3.putObject({
                Bucket: dstBucket,
                Key: dstKey,
                Body: data,
                ContentType: contentType
            }, callback);
        }
};

This is the error on Cloudwatch:

AccessDenied: Access Denied

This is the stack error:

at Request.extractError (/var/runtime/node_modules/aws-sdk/lib/services/s3.js:329:35)

at Request.callListeners (/var/runtime/node_modules/aws-sdk/lib/sequential_executor.js:105:20) 

at Request.emit (/var/runtime/node_modules/aws-sdk/lib/sequential_executor.js:77:10)

at Request.emit (/var/runtime/node_modules/aws-sdk/lib/request.js:596:14)

at Request.transition (/var/runtime/node_modules/aws-sdk/lib/request.js:21:10) 

at AcceptorStateMachine.runTo (/var/runtime/node_modules/aws-sdk/lib/state_machine.js:14:12) 

at /var/runtime/node_modules/aws-sdk/lib/state_machine.js:26:10 

at Request.<anonymous> (/var/runtime/node_modules/aws-sdk/lib/request.js:37:9) 

at Request.<anonymous> (/var/runtime/node_modules/aws-sdk/lib/request.js:598:12) 

at Request.callListeners (/var/runtime/node_modules/aws-sdk/lib/sequential_executor.js:115:18)

Without any other description or info on S3 bucket permissions allow to everyone put list and delete.

What can I do to access the S3 bucket?

PS: on Lambda event properties the principal is correct and has administrative privileges.


回答1:


Your Lambda does not have privileges (S3:GetObject).

Go to IAM dashboard, check the role associated with your Lambda execution. If you use AWS wizard, it automatically creates a role called oneClick_lambda_s3_exec_role. Click on Show Policy. It should show something similar to the attached image. Make sure S3:GetObject is listed.




回答2:


I ran into this issue and after hours of IAM policy madness, the solution was to:

  1. Go to S3 console
  2. Click bucket you are interested in.
  3. Click 'Properties'
  4. Unfold 'Permissions'
  5. Click 'Add more permissions'
  6. Choose 'Any Authenticated AWS User' from dropdown. Select 'Upload/Delete' and 'List' (or whatever you need for your lambda).
  7. Click 'Save'

Done. Your carefully written IAM role policies don't matter, neither do specific bucket policies (I've written those too to make it work). Or they just don't work on my account, who knows.

[EDIT]

After a lot of tinkering the above approach is not the best. Try this:

  1. Keep your role policy as in the helloV post.
  2. Go to S3. Select your bucket. Click Permissions. Click Bucket Policy.
  3. Try something like this:
{
    "Version": "2012-10-17",
    "Id": "Lambda access bucket policy",
    "Statement": [
        {
            "Sid": "All on objects in bucket lambda",
            "Effect": "Allow",
            "Principal": {
                "AWS": "arn:aws:iam::AWSACCOUNTID:root"
            },
            "Action": "s3:*",
            "Resource": "arn:aws:s3:::BUCKET-NAME/*"
        },
        {
            "Sid": "All on bucket by lambda",
            "Effect": "Allow",
            "Principal": {
                "AWS": "arn:aws:iam::AWSACCOUNTID:root"
            },
            "Action": "s3:*",
            "Resource": "arn:aws:s3:::BUCKET-NAME"
        }
    ]
}

Worked for me and does not require for you to share with all authenticated AWS users (which most of the time is not ideal).




回答3:


Interestingly enough, AWS returns 403 (access denied) when the file does not exist. Be sure the target file is in the S3 bucket.




回答4:


If you are specifying the Resource don't forget to add the sub folder specification as well. Like this:

"Resource": [
  "arn:aws:s3:::BUCKET-NAME",
  "arn:aws:s3:::BUCKET-NAME/*"
]



回答5:


I too ran into this issue, I fixed this by providing s3:GetObject* in the ACL as it is attempting to obtain a version of that object.




回答6:


If all the other policy ducks are in a row, S3 will still return an Access Denied message if the object doesn't exist AND the requester doesn't have ListObjects permission on the bucket.

From https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html:

...If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

If you have the s3:ListBucket permission on the bucket, Amazon S3 will return an HTTP status code 404 ("no such key") error. if you don’t have the s3:ListBucket permission, Amazon S3 will return an HTTP status code 403 ("access denied") error.




回答7:


I tried to execute a basic blueprint Python lambda function [example code] and I had the same issue. My execition role was lambda_basic_execution

I went to S3 > (my bucket name here) > permissions .

Because I'm beginner, I used the Policy Generator provided by Amazon rather than writing JSON myself: http://awspolicygen.s3.amazonaws.com/policygen.html my JSON looks like this:

{
    "Id": "Policy153536723xxxx",
    "Version": "2012-10-17",
    "Statement": [
        {
            "Sid": "Stmt153536722xxxx",
            "Action": [
                "s3:GetObject"
            ],
            "Effect": "Allow",
            "Resource": "arn:aws:s3:::tokabucket/*",
            "Principal": {
                "AWS": [
                    "arn:aws:iam::82557712xxxx:role/lambda_basic_execution"
                ]
            }
        }
    ]

And then the code executed nicely:




回答8:


I was trying to read a file from s3 and create a new file by changing content of file read (Lambda + Node). Reading file from S3 did not had any problem. As soon I tried writing to S3 bucket I get 'Access Denied' error.

I tried every thing listed above but couldn't get rid of 'Access Denied'. Finally I was able to get it working by giving 'List Object' permission to everyone on my bucket.

Obviously this not the best approach but nothing else worked.




回答9:


If you have encryption set on your S3 bucket (such as AWS KMS), you may need to make sure the IAM role applied to your Lambda function is added to the list of IAM > Encryption keys > region > key > Key Users for the corresponding key that you used to encrypt your S3 bucket at rest.

In my screenshot, for example, I added the CyclopsApplicationLambdaRole role that I have applied to my Lambda function as a Key User in IAM for the same AWS KMS key that I used to encrypt my S3 bucket. Don't forget to select the correct region for your key when you open up the Encryption keys UI.

Find the execution role you've applied to your Lambda function:

Find the key you used to add encryption to your S3 bucket:

In IAM > Encryption keys, choose your region and click on the key name:

Add the role as a Key User in IAM Encryption keys for the key specified in S3:




回答10:


I was struggling with this issue for hours. I was using AmazonS3EncryptionClient and nothing I did helped. Then I noticed that the client is actually deprecated, so I thought I'd try switching to the builder model they have:

var builder = AmazonS3EncryptionClientBuilder.standard()
  .withEncryptionMaterials(new StaticEncryptionMaterialsProvider(encryptionMaterials))
if (accessKey.nonEmpty && secretKey.nonEmpty) builder = builder.withCredentials(new AWSStaticCredentialsProvider(new BasicAWSCredentials(accessKey.get, secretKey.get)))
builder.build()

And... that solved it. Looks like Lambda has trouble injecting the credentials in the old model, but works well in the new one.



来源:https://stackoverflow.com/questions/35589641/aws-lambda-function-getting-access-denied-when-getobject-from-s3

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!