//
|
// Copyright © 2015年 Anson. All rights reserved.
|
//
|
|
#import "UIImage+LeChange.h"
|
|
CGFloat DegreesToRadians(CGFloat degrees) {return degrees * M_PI / 180;};
|
CGFloat RadiansToDegrees(CGFloat radians) {return radians * 180/M_PI;};
|
|
|
@implementation UIImage (LeChange)
|
|
- (CGFloat)width {
|
return self.size.width;
|
}
|
|
- (CGFloat)height {
|
return self.size.height;
|
}
|
|
- (UIImage *)applyBlurWithRadius:(CGFloat)blurRadius
|
tintColor:(UIColor *)tintColor
|
saturationDeltaFactor:(CGFloat)saturationDeltaFactor
|
maskImage:(UIImage *)maskImage
|
{
|
// Check pre-conditions.
|
if (self.size.width < 1 || self.size.height < 1) {
|
NSLog (@"*** error: invalid size: (%.2f x %.2f). Both dimensions must be >= 1: %@", self.size.width, self.size.height, self);
|
return nil;
|
}
|
if (!self.CGImage) {
|
NSLog (@"*** error: image must be backed by a CGImage: %@", self);
|
return nil;
|
}
|
if (maskImage && !maskImage.CGImage) {
|
NSLog (@"*** error: maskImage must be backed by a CGImage: %@", maskImage);
|
return nil;
|
}
|
|
CGRect imageRect = { CGPointZero, self.size };
|
UIImage *effectImage = self;
|
|
BOOL hasBlur = blurRadius > __FLT_EPSILON__;
|
BOOL hasSaturationChange = fabs(saturationDeltaFactor - 1.) > __FLT_EPSILON__;
|
if (hasBlur || hasSaturationChange) {
|
UIGraphicsBeginImageContextWithOptions(self.size, NO, [[UIScreen mainScreen] scale]);
|
CGContextRef effectInContext = UIGraphicsGetCurrentContext();
|
CGContextScaleCTM(effectInContext, 1.0, -1.0);
|
CGContextTranslateCTM(effectInContext, 0, -self.size.height);
|
CGContextDrawImage(effectInContext, imageRect, self.CGImage);
|
|
vImage_Buffer effectInBuffer;
|
effectInBuffer.data = CGBitmapContextGetData(effectInContext);
|
effectInBuffer.width = CGBitmapContextGetWidth(effectInContext);
|
effectInBuffer.height = CGBitmapContextGetHeight(effectInContext);
|
effectInBuffer.rowBytes = CGBitmapContextGetBytesPerRow(effectInContext);
|
|
UIGraphicsBeginImageContextWithOptions(self.size, NO, [[UIScreen mainScreen] scale]);
|
CGContextRef effectOutContext = UIGraphicsGetCurrentContext();
|
vImage_Buffer effectOutBuffer;
|
effectOutBuffer.data = CGBitmapContextGetData(effectOutContext);
|
effectOutBuffer.width = CGBitmapContextGetWidth(effectOutContext);
|
effectOutBuffer.height = CGBitmapContextGetHeight(effectOutContext);
|
effectOutBuffer.rowBytes = CGBitmapContextGetBytesPerRow(effectOutContext);
|
|
if (hasBlur) {
|
// A description of how to compute the box kernel width from the Gaussian
|
// radius (aka standard deviation) appears in the SVG spec:
|
// http://www.w3.org/TR/SVG/filters.html#feGaussianBlurElement
|
//
|
// For larger values of 's' (s >= 2.0), an approximation can be used: Three
|
// successive box-blurs build a piece-wise quadratic convolution kernel, which
|
// approximates the Gaussian kernel to within roughly 3%.
|
//
|
// let d = floor(s * 3*sqrt(2*pi)/4 + 0.5)
|
//
|
// ... if d is odd, use three box-blurs of size 'd', centered on the output pixel.
|
//
|
CGFloat inputRadius = blurRadius * [[UIScreen mainScreen] scale];
|
NSUInteger radius = floor(inputRadius * 3. * sqrt(2 * M_PI) / 4 + 0.5);
|
if (radius % 2 != 1) {
|
radius += 1; // force radius to be odd so that the three box-blur methodology works.
|
}
|
vImageBoxConvolve_ARGB8888(&effectInBuffer, &effectOutBuffer, NULL, 0, 0, (uint32_t)radius, (uint32_t)radius, 0, kvImageEdgeExtend);
|
vImageBoxConvolve_ARGB8888(&effectOutBuffer, &effectInBuffer, NULL, 0, 0, (uint32_t)radius, (uint32_t)radius, 0, kvImageEdgeExtend);
|
vImageBoxConvolve_ARGB8888(&effectInBuffer, &effectOutBuffer, NULL, 0, 0, (uint32_t)radius, (uint32_t)radius, 0, kvImageEdgeExtend);
|
}
|
BOOL effectImageBuffersAreSwapped = NO;
|
if (hasSaturationChange) {
|
CGFloat s = saturationDeltaFactor;
|
CGFloat floatingPointSaturationMatrix[] = {
|
0.0722 + 0.9278 * s, 0.0722 - 0.0722 * s, 0.0722 - 0.0722 * s, 0,
|
0.7152 - 0.7152 * s, 0.7152 + 0.2848 * s, 0.7152 - 0.7152 * s, 0,
|
0.2126 - 0.2126 * s, 0.2126 - 0.2126 * s, 0.2126 + 0.7873 * s, 0,
|
0, 0, 0, 1,
|
};
|
const int32_t divisor = 256;
|
NSUInteger matrixSize = sizeof(floatingPointSaturationMatrix)/sizeof(floatingPointSaturationMatrix[0]);
|
int16_t saturationMatrix[matrixSize];
|
for (NSUInteger i = 0; i < matrixSize; ++i) {
|
saturationMatrix[i] = (int16_t)roundf(floatingPointSaturationMatrix[i] * divisor);
|
}
|
if (hasBlur) {
|
vImageMatrixMultiply_ARGB8888(&effectOutBuffer, &effectInBuffer, saturationMatrix, divisor, NULL, NULL, kvImageNoFlags);
|
effectImageBuffersAreSwapped = YES;
|
}
|
else {
|
vImageMatrixMultiply_ARGB8888(&effectInBuffer, &effectOutBuffer, saturationMatrix, divisor, NULL, NULL, kvImageNoFlags);
|
}
|
}
|
if (!effectImageBuffersAreSwapped)
|
effectImage = UIGraphicsGetImageFromCurrentImageContext();
|
UIGraphicsEndImageContext();
|
|
if (effectImageBuffersAreSwapped)
|
effectImage = UIGraphicsGetImageFromCurrentImageContext();
|
UIGraphicsEndImageContext();
|
}
|
|
// Set up output context.
|
UIGraphicsBeginImageContextWithOptions(self.size, NO, [[UIScreen mainScreen] scale]);
|
CGContextRef outputContext = UIGraphicsGetCurrentContext();
|
CGContextScaleCTM(outputContext, 1.0, -1.0);
|
CGContextTranslateCTM(outputContext, 0, -self.size.height);
|
|
// Draw base image.
|
CGContextDrawImage(outputContext, imageRect, self.CGImage);
|
|
// Draw effect image.
|
if (hasBlur) {
|
CGContextSaveGState(outputContext);
|
if (maskImage) {
|
CGContextClipToMask(outputContext, imageRect, maskImage.CGImage);
|
}
|
CGContextDrawImage(outputContext, imageRect, effectImage.CGImage);
|
CGContextRestoreGState(outputContext);
|
}
|
|
// Add in color tint.
|
if (tintColor) {
|
CGContextSaveGState(outputContext);
|
CGContextSetFillColorWithColor(outputContext, tintColor.CGColor);
|
CGContextFillRect(outputContext, imageRect);
|
CGContextRestoreGState(outputContext);
|
}
|
|
// Output image is ready.
|
UIImage *outputImage = UIGraphicsGetImageFromCurrentImageContext();
|
UIGraphicsEndImageContext();
|
|
return outputImage;
|
}
|
|
|
- (UIImage *)lc_imageWithBlurLevel:(CGFloat)blur
|
{
|
UIColor *tintColor = [[UIColor blackColor] colorWithAlphaComponent:0.2];
|
return [self applyBlurWithRadius:blur tintColor:tintColor saturationDeltaFactor:1.8 maskImage:nil];
|
}
|
|
- (BOOL)lc_writeToFileAtPath:(NSString*)aPath
|
{
|
if ((self == nil) || (aPath == nil) || ([aPath isEqualToString:@""]))
|
return NO;
|
|
@try
|
{
|
NSData *imageData = nil;
|
NSString *ext = [aPath pathExtension];
|
if ([ext isEqualToString:@"png"])
|
{
|
imageData = UIImagePNGRepresentation(self);
|
}
|
else
|
{
|
// the rest, we write to jpeg
|
// 0. best, 1. lost. about compress.
|
imageData = UIImageJPEGRepresentation(self, 1);
|
}
|
|
if ((imageData == nil) || ([imageData length] <= 0))
|
return NO;
|
|
[imageData writeToFile:aPath atomically:YES];
|
return YES;
|
}
|
@catch (NSException *e)
|
{
|
NSLog(@"create thumbnail exception.");
|
}
|
|
return NO;
|
}
|
|
+ (UIImage *)lc_imageWithScreenContents
|
{
|
UIGraphicsBeginImageContextWithOptions([UIScreen mainScreen].bounds.size, YES, 1.0);
|
[[UIApplication sharedApplication].keyWindow.layer renderInContext:UIGraphicsGetCurrentContext()];
|
UIImage *uiImage = UIGraphicsGetImageFromCurrentImageContext();
|
UIGraphicsEndImageContext();
|
return uiImage;
|
}
|
|
+ (UIImage*)lc_createImageWithColor: (UIColor*) color
|
{
|
CGRect rect=CGRectMake(0.0f, 0.0f, 1.0f, 1.0f);
|
UIGraphicsBeginImageContext(rect.size);
|
CGContextRef context = UIGraphicsGetCurrentContext();
|
CGContextSetFillColorWithColor(context, [color CGColor]);
|
CGContextFillRect(context, rect);
|
UIImage *theImage = UIGraphicsGetImageFromCurrentImageContext();
|
UIGraphicsEndImageContext();
|
return theImage;
|
}
|
|
-(UIImage *)lc_imageAtRect:(CGRect)rect
|
{
|
CGImageRef imageRef = CGImageCreateWithImageInRect([self CGImage], rect);
|
UIImage* subImage = [UIImage imageWithCGImage: imageRef];
|
CGImageRelease(imageRef);
|
|
return subImage;
|
|
}
|
|
- (UIImage *)lc_imageByScalingProportionallyToMinimumSize:(CGSize)targetSize
|
{
|
|
UIImage *sourceImage = self;
|
UIImage *newImage = nil;
|
|
CGSize imageSize = sourceImage.size;
|
CGFloat width = imageSize.width;
|
CGFloat height = imageSize.height;
|
|
CGFloat targetWidth = targetSize.width;
|
CGFloat targetHeight = targetSize.height;
|
|
CGFloat scaleFactor = 0.0;
|
CGFloat scaledWidth = targetWidth;
|
CGFloat scaledHeight = targetHeight;
|
|
CGPoint thumbnailPoint = CGPointMake(0.0,0.0);
|
|
if (CGSizeEqualToSize(imageSize, targetSize) == NO) {
|
|
CGFloat widthFactor = targetWidth / width;
|
CGFloat heightFactor = targetHeight / height;
|
|
if (widthFactor > heightFactor)
|
scaleFactor = widthFactor;
|
else
|
scaleFactor = heightFactor;
|
|
scaledWidth = width * scaleFactor;
|
scaledHeight = height * scaleFactor;
|
|
// center the image
|
|
if (widthFactor > heightFactor) {
|
thumbnailPoint.y = (targetHeight - scaledHeight) * 0.5;
|
} else if (widthFactor < heightFactor) {
|
thumbnailPoint.x = (targetWidth - scaledWidth) * 0.5;
|
}
|
}
|
|
|
// this is actually the interesting part:
|
|
UIGraphicsBeginImageContext(targetSize);
|
|
CGRect thumbnailRect = CGRectZero;
|
thumbnailRect.origin = thumbnailPoint;
|
thumbnailRect.size.width = scaledWidth;
|
thumbnailRect.size.height = scaledHeight;
|
|
[sourceImage drawInRect:thumbnailRect];
|
|
newImage = UIGraphicsGetImageFromCurrentImageContext();
|
UIGraphicsEndImageContext();
|
|
if(newImage == nil) NSLog(@"could not scale image");
|
|
|
return newImage ;
|
}
|
|
- (UIImage *)lc_imageByScalingProportionallyToSize:(CGSize)targetSize
|
{
|
|
UIImage *sourceImage = self;
|
UIImage *newImage = nil;
|
|
CGSize imageSize = sourceImage.size;
|
CGFloat width = imageSize.width;
|
CGFloat height = imageSize.height;
|
|
CGFloat targetWidth = targetSize.width;
|
CGFloat targetHeight = targetSize.height;
|
|
CGFloat scaleFactor = 0.0;
|
CGFloat scaledWidth = targetWidth;
|
CGFloat scaledHeight = targetHeight;
|
|
CGPoint thumbnailPoint = CGPointMake(0.0,0.0);
|
|
if (CGSizeEqualToSize(imageSize, targetSize) == NO) {
|
|
CGFloat widthFactor = targetWidth / width;
|
CGFloat heightFactor = targetHeight / height;
|
|
if (widthFactor < heightFactor)
|
scaleFactor = widthFactor;
|
else
|
scaleFactor = heightFactor;
|
|
scaledWidth = width * scaleFactor;
|
scaledHeight = height * scaleFactor;
|
|
// center the image
|
|
if (widthFactor < heightFactor) {
|
thumbnailPoint.y = (targetHeight - scaledHeight) * 0.5;
|
} else if (widthFactor > heightFactor) {
|
thumbnailPoint.x = (targetWidth - scaledWidth) * 0.5;
|
}
|
}
|
|
|
// this is actually the interesting part:
|
|
UIGraphicsBeginImageContext(targetSize);
|
|
CGRect thumbnailRect = CGRectZero;
|
thumbnailRect.origin = thumbnailPoint;
|
thumbnailRect.size.width = scaledWidth;
|
thumbnailRect.size.height = scaledHeight;
|
|
[sourceImage drawInRect:thumbnailRect];
|
|
newImage = UIGraphicsGetImageFromCurrentImageContext();
|
UIGraphicsEndImageContext();
|
|
if(newImage == nil) NSLog(@"could not scale image");
|
|
|
return newImage ;
|
}
|
- (UIImage *)lc_imageByScalingToSize:(CGSize)targetSize {
|
|
UIImage *sourceImage = self;
|
UIImage *newImage = nil;
|
|
// CGSize imageSize = sourceImage.size;
|
// CGFloat width = imageSize.width;
|
// CGFloat height = imageSize.height;
|
|
CGFloat targetWidth = targetSize.width;
|
CGFloat targetHeight = targetSize.height;
|
|
// CGFloat scaleFactor = 0.0;
|
CGFloat scaledWidth = targetWidth;
|
CGFloat scaledHeight = targetHeight;
|
|
CGPoint thumbnailPoint = CGPointMake(0.0,0.0);
|
|
// this is actually the interesting part:
|
|
UIGraphicsBeginImageContext(targetSize);
|
|
CGRect thumbnailRect = CGRectZero;
|
thumbnailRect.origin = thumbnailPoint;
|
thumbnailRect.size.width = scaledWidth;
|
thumbnailRect.size.height = scaledHeight;
|
|
[sourceImage drawInRect:thumbnailRect];
|
|
newImage = UIGraphicsGetImageFromCurrentImageContext();
|
UIGraphicsEndImageContext();
|
|
if(newImage == nil) NSLog(@"could not scale image");
|
|
|
return newImage ;
|
}
|
- (UIImage *)lc_imageRotatedByRadians:(CGFloat)radians
|
{
|
return [self lc_imageRotatedByDegrees:RadiansToDegrees(radians)];
|
}
|
- (UIImage *)lc_imageRotatedByDegrees:(CGFloat)degrees
|
{
|
// calculate the size of the rotated view's containing box for our drawing space
|
UIView *rotatedViewBox = [[UIView alloc] initWithFrame:CGRectMake(0,0,self.size.width, self.size.height)];
|
CGAffineTransform t = CGAffineTransformMakeRotation(DegreesToRadians(degrees));
|
rotatedViewBox.transform = t;
|
CGSize rotatedSize = rotatedViewBox.frame.size;
|
|
// Create the bitmap context
|
UIGraphicsBeginImageContext(rotatedSize);
|
CGContextRef bitmap = UIGraphicsGetCurrentContext();
|
|
// Move the origin to the middle of the image so we will rotate and scale around the center.
|
CGContextTranslateCTM(bitmap, rotatedSize.width/2, rotatedSize.height/2);
|
|
// // Rotate the image context
|
CGContextRotateCTM(bitmap, DegreesToRadians(degrees));
|
|
// Now, draw the rotated/scaled image into the context
|
CGContextScaleCTM(bitmap, 1.0, -1.0);
|
CGContextDrawImage(bitmap, CGRectMake(-self.size.width / 2, -self.size.height / 2, self.size.width, self.size.height), [self CGImage]);
|
|
UIImage *newImage = UIGraphicsGetImageFromCurrentImageContext();
|
UIGraphicsEndImageContext();
|
return newImage;
|
|
}
|
|
- (UIImage *)lc_keepScaleWithWidth:(float) newWidth
|
{
|
float oldWidth = self.size.width;
|
float oldHeight = self.size.height;
|
float newHeight = newWidth / oldWidth * oldHeight;
|
CGSize newSize = CGSizeMake(newWidth, newHeight);
|
return [self lc_imageByScalingToSize:newSize];
|
}
|
|
- (UIImage *)lc_scaleWithOutDamageInTop:(CGFloat)top
|
left:(CGFloat)left
|
bottom:(CGFloat)bottom
|
right:(CGFloat)right;
|
{
|
|
UIEdgeInsets insets = UIEdgeInsetsMake(top, left, bottom, right);
|
UIImage *img2 = [self resizableImageWithCapInsets:insets];
|
return img2;
|
}
|
|
- (UIImage *)lc_chnageToScale:(float)scaleSize
|
{
|
UIGraphicsBeginImageContext(CGSizeMake(self.size.width*scaleSize,self.size.height*scaleSize));
|
[self drawInRect:CGRectMake(0, 0, self.size.width * scaleSize, self.size.height *scaleSize)];
|
UIImage *scaledImage = UIGraphicsGetImageFromCurrentImageContext();
|
UIGraphicsEndImageContext();
|
return scaledImage;
|
}
|
|
- (UIImage *)lc_centerClipBySize:(CGFloat)size
|
{
|
CGFloat width = self.size.width;
|
CGFloat height = self.size.height;
|
CGFloat targetSize = 0;
|
if (width > height) { //以中心区域的高度作为基准,裁剪方形图片
|
targetSize = MIN(size, height);
|
}
|
else
|
{
|
targetSize = MIN(size, width);
|
}
|
|
CGFloat left = (width - targetSize) / 2.0;
|
CGFloat top = (height - targetSize) / 2.0;
|
CGRect rect = CGRectMake(left, top, size, size);
|
return [self lc_imageAtRect:rect];
|
}
|
|
- (UIImage *)lc_centerScaleToSize:(CGSize)size {
|
|
UIImage *image = [self lc_cutWithWideRate:size.width / size.height];
|
//缩放比例
|
float scale = size.width / image.width;
|
image = [image lc_chnageToScale:scale];
|
return image;
|
}
|
|
- (UIImage *)lc_cutWithWideRate:(CGFloat)rate {
|
|
CGFloat x = 0.0;
|
CGFloat y = 0.0;
|
CGFloat w = 0.0;
|
CGFloat h = 0.0;
|
|
CGFloat imageSizeRate = self.width / self.height;
|
if(imageSizeRate >= rate) {
|
w = self.height * rate;
|
h = self.height;
|
x = (self.width - w) / 2;
|
y = 0;
|
}else {
|
w = self.width;
|
h = self.width / rate;
|
x = 0;
|
y = (self.height - h) / 2;
|
}
|
|
return [self lc_imageAtRect:CGRectMake(x, y, w, h)];
|
}
|
|
- (UIImage *)lc_imageScalingWithSize:(CGSize)size {
|
|
CGFloat scaleWidth = self.size.width / size.width;
|
CGFloat scaleHeight = self.size.height / size.height;
|
CGFloat scale = MAX(scaleWidth, scaleHeight);
|
|
return [UIImage imageWithCGImage:self.CGImage scale:scale orientation:UIImageOrientationUp];
|
}
|
|
- (UIImage *)lc_imageWithColor:(UIColor *)color
|
{
|
UIGraphicsBeginImageContextWithOptions(self.size, NO, self.scale);
|
CGContextRef context = UIGraphicsGetCurrentContext();
|
CGContextTranslateCTM(context, 0, self.size.height);
|
CGContextScaleCTM(context, 1.0, -1.0);
|
CGContextSetBlendMode(context, kCGBlendModeNormal);
|
CGRect rect = CGRectMake(0, 0, self.size.width, self.size.height);
|
CGContextClipToMask(context, rect, self.CGImage);
|
[color setFill];
|
CGContextFillRect(context, rect);
|
UIImage*newImage = UIGraphicsGetImageFromCurrentImageContext();
|
UIGraphicsEndImageContext();
|
return newImage;
|
}
|
|
- (NSData *)compressToMaxDataSizeKBytes:(CGFloat)size{
|
NSData * data = UIImageJPEGRepresentation(self, 1.0);
|
CGFloat dataKBytes = data.length/1000.0;
|
CGFloat maxQuality = 0.9f;
|
CGFloat lastData = dataKBytes;
|
while (dataKBytes > size && maxQuality > 0.01f) {
|
maxQuality = maxQuality - 0.01f;
|
data = UIImageJPEGRepresentation(self, maxQuality);
|
dataKBytes = data.length / 1000.0;
|
if (lastData == dataKBytes) {
|
break;
|
}else{
|
lastData = dataKBytes;
|
}
|
}
|
return data;
|
}
|
@end
|