问题
I am trying resize images with a cloudfront distribution accoring to article : https://aws.amazon.com/tr/blogs/networking-and-content-delivery/resizing-images-with-amazon-cloudfront-lambdaedge-aws-cdn-blog/
I created project folder with given Origin-Response and Viewer-Request functions on article and I downloaded dependencies, deployed the zip package with cloudformation template.
IAM Role, s3 bucket, bucket policy, distribution with lambda@edge functions were created without any error and they all seem compatible.
But I get the error below when I try to resize an image in the origin bucket;
"503 ERROR The request could not be satisfied. The Lambda function associated with the CloudFront distribution is invalid or doesn't have the required permissions. "
I also don't see anything on monitoring which means my functions are not invoked.
I created another admin role with "AdministratorAccess" policy and trust relationships The "edgelambda.amazonaws.com","lambda.amazonaws.com"
I change my bucket policy to all public.
I could view the image but I still get 503 error when I try to resize with adding querystring to cloudfront distribution url
"xxxxxxxxx.net/images/pexels.jpeg?d=100x100"
Here are my bucket, bucket policy, IAM Roles and functions.
Bucket name : image-resize-488052071209-us-east-1
Bukcet policy:
{
"Version": "2008-10-17",
"Statement": [
{
"Sid": "AllowPublicRead",
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::image-resize-488052071209-us-east-1/*"
},
{
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::image-resize-488052071209-us-east-1/*"
},
{
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::image-resize-488052071209-us-east-1/*"
}
]
}
IAM Roles:
Admin:
AdministratorAccess, "edgelambda.amazonaws.com","lambda.amazonaws.com" trust relationships
ImageFunctionsAndRole-EdgeLambdaRole-1U93T440VWXKT:
AmazonS3FullAccess, CloudFrontFullAccess, AWSLambdaExecute, CloudFrontReadOnlyAccess, AWSLambdaBasicExecutionRole
Viewer-Request Function
'use strict';
const querystring = require('querystring');
// defines the allowed dimensions, default dimensions and how much variance from allowed
// dimension is allowed.
const variables = {
allowedDimension : [ {w:100,h:100}, {w:200,h:200}, {w:300,h:300}, {w:400,h:400} ],
defaultDimension : {w:200,h:200},
variance: 20,
webpExtension: 'webp'
};
exports.handler = (event, context, callback) => {
const request = event.Records[0].cf.request;
const headers = request.headers;
// parse the querystrings key-value pairs. In our case it would be d=100x100
const params = querystring.parse(request.querystring);
// fetch the uri of original image
let fwdUri = request.uri;
// if there is no dimension attribute, just pass the request
if(!params.d){
callback(null, request);
return;
}
// read the dimension parameter value = width x height and split it by 'x'
const dimensionMatch = params.d.split("x");
// set the width and height parameters
let width = dimensionMatch[0];
let height = dimensionMatch[1];
// parse the prefix, image name and extension from the uri.
// In our case /images/image.jpg
const match = fwdUri.match(/(.*)\/(.*)\.(.*)/);
let prefix = match[1];
let imageName = match[2];
let extension = match[3];
// define variable to be set to true if requested dimension is allowed.
let matchFound = false;
// calculate the acceptable variance. If image dimension is 105 and is within acceptable
// range, then in our case, the dimension would be corrected to 100.
let variancePercent = (variables.variance/100);
for (let dimension of variables.allowedDimension) {
let minWidth = dimension.w - (dimension.w * variancePercent);
let maxWidth = dimension.w + (dimension.w * variancePercent);
if(width >= minWidth && width <= maxWidth){
width = dimension.w;
height = dimension.h;
matchFound = true;
break;
}
}
// if no match is found from allowed dimension with variance then set to default
//dimensions.
if(!matchFound){
width = variables.defaultDimension.w;
height = variables.defaultDimension.h;
}
// read the accept header to determine if webP is supported.
let accept = headers['accept']?headers['accept'][0].value:"";
let url = [];
// build the new uri to be forwarded upstream
url.push(prefix);
url.push(width+"x"+height);
// check support for webp
if (accept.includes(variables.webpExtension)) {
url.push(variables.webpExtension);
}
else{
url.push(extension);
}
url.push(imageName+"."+extension);
fwdUri = url.join("/");
// final modified url is of format /images/200x200/webp/image.jpg
request.uri = fwdUri;
callback(null, request);
};
Origin-Response Function:
'use strict';
const http = require('http');
const https = require('https');
const querystring = require('querystring');
const AWS = require('aws-sdk');
const S3 = new AWS.S3({
signatureVersion: 'v4',
});
const Sharp = require('sharp');
// set the S3 and API GW endpoints
const BUCKET = 'image-resize-${AWS::AccountId}-us-east-1';
exports.handler = (event, context, callback) => {
let response = event.Records[0].cf.response;
console.log("Response status code :%s", response.status);
//check if image is not present
if (response.status == 404) {
let request = event.Records[0].cf.request;
let params = querystring.parse(request.querystring);
// if there is no dimension attribute, just pass the response
if (!params.d) {
callback(null, response);
return;
}
// read the dimension parameter value = width x height and split it by 'x'
let dimensionMatch = params.d.split("x");
// read the required path. Ex: uri /images/100x100/webp/image.jpg
let path = request.uri;
// read the S3 key from the path variable.
// Ex: path variable /images/100x100/webp/image.jpg
let key = path.substring(1);
// parse the prefix, width, height and image name
// Ex: key=images/200x200/webp/image.jpg
let prefix, originalKey, match, width, height, requiredFormat, imageName;
let startIndex;
try {
match = key.match(/(.*)\/(\d+)x(\d+)\/(.*)\/(.*)/);
prefix = match[1];
width = parseInt(match[2], 10);
height = parseInt(match[3], 10);
// correction for jpg required for 'Sharp'
requiredFormat = match[4] == "jpg" ? "jpeg" : match[4];
imageName = match[5];
originalKey = prefix + "/" + imageName;
}
catch (err) {
// no prefix exist for image..
console.log("no prefix present..");
match = key.match(/(\d+)x(\d+)\/(.*)\/(.*)/);
width = parseInt(match[1], 10);
height = parseInt(match[2], 10);
// correction for jpg required for 'Sharp'
requiredFormat = match[3] == "jpg" ? "jpeg" : match[3];
imageName = match[4];
originalKey = imageName;
}
// get the source image file
S3.getObject({ Bucket: BUCKET, Key: originalKey }).promise()
// perform the resize operation
.then(data => Sharp(data.Body)
.resize(width, height)
.toFormat(requiredFormat)
.toBuffer()
)
.then(buffer => {
// save the resized object to S3 bucket with appropriate object key.
S3.putObject({
Body: buffer,
Bucket: BUCKET,
ContentType: 'image/' + requiredFormat,
CacheControl: 'max-age=31536000',
Key: key,
StorageClass: 'STANDARD'
}).promise()
// even if there is exception in saving the object we send back the generated
// image back to viewer below
.catch(() => { console.log("Exception while writing resized image to bucket")});
// generate a binary response with resized image
response.status = 200;
response.body = buffer.toString('base64');
response.bodyEncoding = 'base64';
response.headers['content-type'] = [{ key: 'Content-Type', value: 'image/' + requiredFormat }];
callback(null, response);
})
.catch( err => {
console.log("Exception while reading source image :%j",err);
});
} // end of if block checking response statusCode
else {
// allow the response to pass through
callback(null, response);
}
};
回答1:
I followed the same blog post and hit the same issues, which I've been battling for several hours. I now have a working solution so thought I'd share my setup.
I didn't use CloudFormation, and instead created the resources manually.
The first change from the article required is in the origin-response/index.js
script. AWS returns a 403
status for non existant files, so the line which checks if (response.status == 404) {
needs changing to the below:
if (response.status == 404 || response.status == 403) {
The next change I made was on the AWSLambdaBasicExecutionRole
policy. Because the Lambda function can run in multiple regions, it writes logs to CloudWatch in multiple regions. So I changed the Resource ARN to wildcard the region. Below is the Policy JSON:
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Resource": [
"arn:aws:logs:*:*:*"
]
}
]
}
Next up I made sure that the Bucket Policy allowed access to the Lambda role and CloudFront:
{
"Version": "2008-10-17",
"Id": "PolicyForCloudFrontPrivateContent",
"Statement": [
{
"Sid": "1",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::cloudfront:user/CloudFront Origin Access Identity XXXXXXXXXXXXX"
},
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::BUCKET_NAME/*"
},
{
"Sid": "2",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::XXXXXXXXXXXXX:role/service-role/image-resize-origin-response-role-XXXXXXXXX"
},
"Action": [
"s3:PutObject",
"s3:GetObject",
],
"Resource": "arn:aws:s3:::BUCKET_NAME/*"
}
]
}
The last piece of the puzzle was creating a policy for the same Lambda role to work with the S3 Bucket:
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "1",
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject"
],
"Resource": "arn:aws:s3:::BUCKET_NAME/*"
}
]
}
来源:https://stackoverflow.com/questions/55705775/how-to-fix-503-error-with-resize-image-lambda-edge-functions-on-cloudfront