Video gravity in CALayer in AVExportSession

My application records video first and then adds some effects which it exports using AVExportSession.

First, the problem was the gravity of the video during video recording, which was solved by changing the property videoGravity

inside the AVCaptureVideoPreviewLayer

d AVLayerVideoGravityResizeAspectFill

.

Secondly, the problem was showing the recorded video, which was solved by changing the property videoGravity

inside AVPlayerLayer

toAVLayerVideoGravityResizeAspectFill

but the problem comes when I want to export the video after adding some effect with using AVExportSession

, the video gravity issues again. even changing a property contentsGravity

in CALayer does not affect the output. I have to mention that this problem is obvious on iPad.

here is the image when i want to show the video before adding some effects:

as you can see my fingertip is at the top of the screen (because I fixed the gravity issue in the layer inside the app)

but after exporting and saving to gallery, what I see is as follows:

I know the problem is with gravity, but I don't know how I can fix it. i dont know what changes i should make to video when i record or change below code when i export:

    let composition = AVMutableComposition()
    let asset = AVURLAsset(url: videoUrl, options: nil)

    let tracks =  asset.tracks(withMediaType : AVMediaTypeVideo)
    let videoTrack:AVAssetTrack = tracks[0] as AVAssetTrack
    let timerange = CMTimeRangeMake(kCMTimeZero, asset.duration)

    let viewSize = parentView.bounds.size
    let trackSize = videoTrack.naturalSize

    let compositionVideoTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: CMPersistentTrackID())

    do {
        try compositionVideoTrack.insertTimeRange(timerange, of: videoTrack, at: kCMTimeZero)
    } catch {
        print(error)
    }

    let compositionAudioTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: CMPersistentTrackID())

    for audioTrack in asset.tracks(withMediaType: AVMediaTypeAudio) {
        do {
            try compositionAudioTrack.insertTimeRange(audioTrack.timeRange, of: audioTrack, at: kCMTimeZero)
        } catch {
            print(error)
        }
    }

    let videolayer = CALayer()
    videolayer.frame.size = viewSize
    videolayer.contentsGravity = kCAGravityResizeAspectFill

    let parentlayer = CALayer()
    parentlayer.frame.size = viewSize
    parentlayer.contentsGravity = kCAGravityResizeAspectFill

    parentlayer.addSublayer(videolayer)

    let layercomposition = AVMutableVideoComposition()
    layercomposition.frameDuration = CMTimeMake(1, 30)
    layercomposition.renderSize = viewSize
    layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: parentlayer)

    let instruction = AVMutableVideoCompositionInstruction()

    instruction.timeRange = CMTimeRangeMake(kCMTimeZero, asset.duration)

    let videotrack = composition.tracks(withMediaType: AVMediaTypeVideo)[0] as AVAssetTrack
    let layerinstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videotrack)

    let trackTransform = videoTrack.preferredTransform
    let xScale = viewSize.height / trackSize.width
    let yScale = viewSize.width / trackSize.height

    var exportTransform : CGAffineTransform!
    if (getVideoOrientation(transform: videoTrack.preferredTransform).1 == .up) {
        exportTransform = videoTrack.preferredTransform.translatedBy(x: trackTransform.ty * -1 , y: 0).scaledBy(x: xScale , y: yScale)
    } else {
        exportTransform = CGAffineTransform.init(translationX: viewSize.width, y: 0).rotated(by: .pi/2).scaledBy(x: xScale, y: yScale)
    }

    layerinstruction.setTransform(exportTransform, at: kCMTimeZero)

    instruction.layerInstructions = [layerinstruction]
    layercomposition.instructions = [instruction]

    let filePath = FileHelper.getVideoTimeStampName()
    let exportedUrl = URL(fileURLWithPath: filePath)

    guard let assetExport = AVAssetExportSession(asset: composition, presetName:AVAssetExportPresetHighestQuality) else {delegate?.exportFinished(status: .failed, outputUrl: exportedUrl); return}

    assetExport.videoComposition = layercomposition
    assetExport.outputFileType = AVFileTypeMPEG4
    assetExport.outputURL = exportedUrl
    assetExport.exportAsynchronously(completionHandler: {
        switch assetExport.status {
        case .completed:
            print("video exported successfully")
            self.delegate?.exportFinished(status: .completed, outputUrl: exportedUrl)
            break
        case .failed:
            self.delegate?.exportFinished(status: .failed, outputUrl: exportedUrl)
            print("exporting video failed: \(String(describing: assetExport.error))")
            break
        default :
            print("the video export status is \(assetExport.status)")
            self.delegate?.exportFinished(status: assetExport.status, outputUrl: exportedUrl)
            break
        }
    })

      

I would appreciate it if anyone could help.

+3


source to share


1 answer


If you are using AVLayerVideoGravityResizeAspectFill

, the video capture screen will be set to CALayer

. So what actually happens is that the camera actually captures the second image you provided. You can solve this problem in the following ways:

  • Get image as UIImage
  • Crop the image with the same CALayer size you are using
  • Upload the cropped image to the server, show it to the user, etc.


To crop an image, you can use this:

extension UIImage {
    func crop(to:CGSize) -> UIImage {
            guard let cgimage = self.cgImage else { return self }

            let contextImage: UIImage = UIImage(cgImage: cgimage)

            let contextSize: CGSize = contextImage.size

            //Set to square
            var posX: CGFloat = 0.0
            var posY: CGFloat = 0.0
            let cropAspect: CGFloat = to.width / to.height

            var cropWidth: CGFloat = to.width
            var cropHeight: CGFloat = to.height

            if to.width > to.height { //Landscape
                cropWidth = contextSize.width
                cropHeight = contextSize.width / cropAspect
                posY = (contextSize.height - cropHeight) / 2
            } else if to.width < to.height { //Portrait
                cropHeight = contextSize.height
                cropWidth = contextSize.height * cropAspect
                posX = (contextSize.width - cropWidth) / 2
            } else { //Square
                if contextSize.width >= contextSize.height { //Square on landscape (or square)
                    cropHeight = contextSize.height
                    cropWidth = contextSize.height * cropAspect
                    posX = (contextSize.width - cropWidth) / 2
                }else{ //Square on portrait
                    cropWidth = contextSize.width
                    cropHeight = contextSize.width / cropAspect
                    posY = (contextSize.height - cropHeight) / 2
                }
            }

            let rect: CGRect = CGRect(x: posX, y: posY, width: cropWidth, height: cropHeight)
            // Create bitmap image from context using the rect
            let imageRef: CGImage = contextImage.cgImage!.cropping(to: rect)!

            // Create a new image based on the imageRef and rotate back to the original orientation
            let cropped: UIImage = UIImage(cgImage: imageRef, scale: self.scale, orientation: self.imageOrientation)

            UIGraphicsBeginImageContextWithOptions(to, true, self.scale)
            cropped.draw(in: CGRect(x: 0, y: 0, width: to.width, height: to.height))
            let resized = UIGraphicsGetImageFromCurrentImageContext()
            UIGraphicsEndImageContext()

            return resized!
        }
}

      

+1


source







All Articles