Runt
2025-07-04 acf8e83cbf106b4350536d54eb46379dd86a623c
输入弹框
图标修改
相机调整
7 files deleted
7 files added
9 files modified
1386 ■■■■■ changed files
.gitignore 1 ●●●● patch | view | raw | blame | history
LiveProject/activity/stream/LiveActivity.swift 96 ●●●● patch | view | raw | blame | history
LiveProject/activity/stream/LiveViewModel.swift 99 ●●●●● patch | view | raw | blame | history
LiveProject/controller/CameraCapture.swift 31 ●●●●● patch | view | raw | blame | history
LiveProject/data/DeviceInfo.swift 2 ●●● patch | view | raw | blame | history
LiveProject/data/IconInfo.swift 12 ●●●●● patch | view | raw | blame | history
LiveProject/enum/Icons.swift 24 ●●●●● patch | view | raw | blame | history
LiveProject/shape/IconBack.swift 36 ●●●●● patch | view | raw | blame | history
LiveProject/shape/IconCamera.swift 60 ●●●●● patch | view | raw | blame | history
LiveProject/shape/IconLandscape.swift 27 ●●●●● patch | view | raw | blame | history
LiveProject/shape/IconMic.swift 63 ●●●●● patch | view | raw | blame | history
LiveProject/shape/IconPortrait.swift 28 ●●●●● patch | view | raw | blame | history
LiveProject/shape/IconRect.swift 22 ●●●●● patch | view | raw | blame | history
LiveProject/tool/CameraHelper.swift 12 ●●●●● patch | view | raw | blame | history
LiveProject/tool/H264Encoder.swift 166 ●●●●● patch | view | raw | blame | history
LiveProject/tool/MetalPixelConverter.swift 227 ●●●●● patch | view | raw | blame | history
LiveProject/tool/MetalRenderer.swift 123 ●●●●● patch | view | raw | blame | history
LiveProject/tool/PixelBufferConverter.swift 44 ●●●●● patch | view | raw | blame | history
LiveProject/views/FlowLayout.swift 180 ●●●● patch | view | raw | blame | history
LiveProject/views/LTextField.swift 73 ●●●●● patch | view | raw | blame | history
LiveProject/views/MButton.swift 16 ●●●●● patch | view | raw | blame | history
LiveProject/views/TitleBarView.swift 8 ●●●● patch | view | raw | blame | history
LiveProject/views/VideoRendererView.swift 36 ●●●●● patch | view | raw | blame | history
.gitignore
New file
@@ -0,0 +1 @@
/.idea/
LiveProject/activity/stream/LiveActivity.swift
@@ -11,10 +11,9 @@
import MetalKit
struct LiveActivity: View {
    @State private var pixelBuffer: CVPixelBuffer?
    
    @State private var showDeviceDialog = false
    @State private var showInputDialog = false
    
    @State private var streamRate = Float(9/16.0);
    @State private var fpsState = 30;
@@ -22,27 +21,27 @@
    
    @State private var displaySize : CGSize = .zero;
    
    @State private var devices = [DeviceInfo(name: "相机", type: .CAMERA, deviceId: UUID().uuidString,icon: IconCamera()),
                                  DeviceInfo(name: "话筒", type: .MICROPHONE,deviceId: UUID().uuidString,icon: IconMic()),
                                  DeviceInfo(name: "系统", type: .SYSTEM,deviceId : UUID().uuidString,icon: IconPortrait())]
    @State private var devices = [DeviceInfo(name: "相机", type: .CAMERA, deviceId: UUID().uuidString,icon: Icons.CAMERA),
                                  DeviceInfo(name: "话筒", type: .MICROPHONE,deviceId: UUID().uuidString,icon: Icons.MIC),
                                  DeviceInfo(name: "系统", type: .SYSTEM,deviceId : UUID().uuidString,icon: Icons.PORTRAIT)]
    
    private let mViewModel = LiveViewModel()
    @StateObject private var mViewModel = LiveViewModel()
    
    var body: some View {
        ZStack{
            Color.clear
                .ignoresSafeArea() // 填满全屏
            VStack{
                VideoRendererView(renderer:mViewModel.renderer).background(Color.black).frame(width: mainSize.width,height:mainSize.height)
                VideoRendererView(pixelBuffer: $mViewModel.pixelBuffer).background(Color.black).frame(width: mainSize.width,height:mainSize.height)
                Spacer()
            }.border(Color.blue)
            VStack{
                Spacer()
                BottomBtns().frame(alignment: .bottom).border(Color.green)
            }
            if showDeviceDialog {
                DialogDevices()
            if showInputDialog{
                DialogInput()
            }
        }.frame(minWidth: 0, maxWidth: .infinity, minHeight: 0, maxHeight: .infinity, alignment: .topLeading)
            .background(
@@ -99,20 +98,59 @@
        print("updateWindow:\(mainSize)")
    }
    
    func DialogDevices() -> some View{
    func DialogInput(onCancel:() ->Void = {},onConfirm:() -> Void = {}) -> some View{
        ZStack{
            Color.black.opacity(0.4)
                .edgesIgnoringSafeArea(.all)
                .onTapGesture {
                    withAnimation {
                        showDeviceDialog = false
                        showInputDialog = false
                    }
                }
            VStack {
                VStack(alignment: .leading, spacing: 40) {
                    Text("请输入直播地址")
                        .font(Font.system(size: 20))
                    LTextField().environmentObject(LText())
                    HStack{
                Spacer()
                        Button(action:{
                            showInputDialog.toggle();
                        }){
                            Text("取消")
                                .font(Font.system(size: 20))
                                .foregroundColor(Color.gray)
                        }
                        Spacer().frame(width: 30)
                        Button(action:{
                            showInputDialog.toggle();
                        }){
                            Text("确认")
                                .font(Font.system(size: 20))
                                .foregroundColor(Color.colorTextLink)
                        }
                    }
                }
                .padding(30)
                .background(Color.white)
                .cornerRadius(20)
                .transition(.move(edge: .bottom))
            }
            .padding(60)
            .zIndex(1)
            .animation(.default, value: devices)
        }
    }
    func DialogDevices() -> some View{
        VStack{
                VStack(spacing: 20) {
                    Spacer().frame(height:40)
                    FlowLayout(devices){ device in
                Spacer().frame(height:20)
                FlowLayout(){
                    ForEach(devices, id: \.self) { device in
                        MButton(icon: device.icon,text: device.name){
                            mViewModel.newWindowAction(device: device){ status in
                                withAnimation{
@@ -122,17 +160,12 @@
                            print("\(device.name) click")
                        }
                    }
                }
            }
            .frame(maxWidth: .infinity,alignment:.leading)
                    .padding()
                }
                .frame(maxWidth: .infinity)
                .padding()
                .background(Color.white)
                .cornerRadius(20)
                .transition(.move(edge: .bottom))
            }
            .zIndex(1)
            .animation(.default, value: devices)
        }
        .frame(maxHeight: .infinity,alignment:.topLeading)
    }
    
    func BottomBtns() -> some View{
@@ -140,7 +173,7 @@
        
            HStack(){
                //横竖屏控制
                MButton(icon:streamRate == (9/16.0) ? IconPortrait() : IconLandscape() ){
                MButton(icon:streamRate == (9/16.0) ? Icons.PORTRAIT : Icons.LANDSCAPE ){
                    streamRate = streamRate == (9/16.0) ? (16/9.0) : (9/16.0)
                    updateWindowSize()
                }
@@ -156,18 +189,29 @@
            HStack{
                LButton(text: "设备"){
                    print("Click 设备 button")
                    withAnimation{
                        showDeviceDialog.toggle()
                }.sheet(isPresented:$showDeviceDialog, content: {
                    VStack {
                        ScrollView {
                            DialogDevices()
                    }
                }
                    .presentationDetents([.height(200),.medium])
                })
                LButton(text: "RTMP"){
                    print("Click RTMP button")
                    withAnimation{
                        showInputDialog.toggle()
                    }
                }
                /*flLButton(text: "文件"){
                    
                }*/
                LButton(text: "文本"){
                    print("Click 文本 button")
                    withAnimation{
                        showInputDialog.toggle()
                    }
                }
            }
            HStack{
LiveProject/activity/stream/LiveViewModel.swift
@@ -7,22 +7,54 @@
import UIKit
import AVFoundation
class LiveViewModel{
class LiveViewModel: ObservableObject {
    @Published var pixelBuffer: CVPixelBuffer?
    let encoder = H264Encoder(width: 1080, height: 1920, fps: 30, bitrate: 1_000_000)
    var frameIndex: Int64 = 0
    let encodeQueue = DispatchQueue(label: "encoder.queue")
    
    lazy var camera = CameraCapture()
    lazy var renderer = MetalRenderer()
    var timestamp = Int(Date().timeIntervalSince1970 * 1000)
    
    func newWindowAction(device:DeviceInfo,completion: @escaping (Bool) -> Void = {b in}){
        switch device.type{
        case StreamType.CAMERA:
            requestCameraPermission(mediaType: .video){ staus in
                if(staus){
                    self.camera.onFrame = { buffer in
                        self.renderer.updateFrame(pixelBuffer: buffer)
                        print("画面更新")
                    var ts1 =  Int(Date().timeIntervalSince1970 * 1000)
                    self.camera.onFrame = {  [weak self]  buffer in
                        guard let self = self else { return }
                        let width = CVPixelBufferGetWidth(buffer)
                        let height = CVPixelBufferGetHeight(buffer)
                        guard width > 0 && height > 0 else {
                            print("Invalid pixel buffer size: \(width)x\(height)")
                            return
                    }
                        self.frameIndex += 1
                        let ts =  Int(Date().timeIntervalSince1970 * 1000)
                        self.timestamp = ts;
                        let cmTime = CMTimeMake(value: Int64(CACurrentMediaTime() * 1000), timescale: 1000);
                        self.encoder.encode(pixelBuffer: buffer, pts: cmTime)
                        DispatchQueue.main.async {
                            self.pixelBuffer = buffer;
                        }
                        //print("画面更新")
                    }
                    DispatchQueue.global(qos: .userInitiated).async {
                    self.camera.start()
                    }
                    print("启动相机")
                    self.encoder.onEncoded = { (data: Data, ctime: CMTime, isKey: Bool) in
                        let timestamp2 = Int(Date().timeIntervalSince1970 * 1000)
                        print("编码时间2 \(timestamp2 - self.timestamp)")
                        print("Encoded NALU size: \(data.count), key frame: \(isKey)")
                    }
                }else{
                    
                }
@@ -33,6 +65,17 @@
            break;
        }
    }
    func closeWindowAction(device:DeviceInfo){
        switch device.type{
        case StreamType.CAMERA:
            print("关闭相机")
            break;
        default:
            break;
        }
    }
    
    
    func requestCameraPermission(mediaType: AVMediaType,completion: @escaping (Bool) -> Void) {
@@ -51,4 +94,50 @@
            completion(false)
        }
    }
    func copyPixelBuffer(_ src: CVPixelBuffer) -> CVPixelBuffer? {
        let width = CVPixelBufferGetWidth(src)
        let height = CVPixelBufferGetHeight(src)
        let pixelFormat = CVPixelBufferGetPixelFormatType(src)
        var dst: CVPixelBuffer?
        let attrs: [String: Any] = [
            kCVPixelBufferIOSurfacePropertiesKey as String: [:]
        ]
        let status = CVPixelBufferCreate(
            kCFAllocatorDefault,
            width,
            height,
            pixelFormat,
            attrs as CFDictionary,
            &dst
        )
        guard status == kCVReturnSuccess, let dstBuffer = dst else {
            print("❌ 复制 PixelBuffer 失败")
            return nil
        }
        CVPixelBufferLockBaseAddress(src, .readOnly)
        CVPixelBufferLockBaseAddress(dstBuffer, [])
        let planeCount = CVPixelBufferGetPlaneCount(src)
        for i in 0..<planeCount {
            let srcAddr = CVPixelBufferGetBaseAddressOfPlane(src, i)
            let dstAddr = CVPixelBufferGetBaseAddressOfPlane(dstBuffer, i)
            let height = CVPixelBufferGetHeightOfPlane(src, i)
            let bytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(src, i)
            memcpy(dstAddr, srcAddr, height * bytesPerRow)
        }
        CVPixelBufferUnlockBaseAddress(src, .readOnly)
        CVPixelBufferUnlockBaseAddress(dstBuffer, [])
        return dstBuffer
    }
}
LiveProject/controller/CameraCapture.swift
@@ -8,6 +8,8 @@
class CameraCapture: NSObject, AVCaptureVideoDataOutputSampleBufferDelegate {
    private let session = AVCaptureSession()
    private var videoOutput: AVCaptureVideoDataOutput?
    private var input: AVCaptureDeviceInput?
    var onFrame: ((CVPixelBuffer) -> Void)?
    func start() {
@@ -17,8 +19,10 @@
            return
        }
        self.input = input
        session.beginConfiguration()
        session.sessionPreset = .high
        session.sessionPreset = .hd1920x1080
        if session.canAddInput(input) {
            session.addInput(input)
@@ -34,14 +38,39 @@
            session.addOutput(output)
        }
        self.videoOutput = output
        session.commitConfiguration()
        session.startRunning()
        print("📷 相机已开启")
    }
    func captureOutput(_ output: AVCaptureOutput,
                       didOutput sampleBuffer: CMSampleBuffer,
                       from connection: AVCaptureConnection) {
        guard let buffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
        let width = CVPixelBufferGetWidth(buffer)
        let height = CVPixelBufferGetHeight(buffer)
        //print("📷 当前帧尺寸: \(width)x\(height)")
        onFrame?(buffer)
    }
    func stop(){
        session.stopRunning()
        session.beginConfiguration()
        if let input = input {
            session.removeInput(input)
        }
        if let output = videoOutput {
            session.removeOutput(output)
        }
        session.commitConfiguration()
        input = nil
        videoOutput = nil
        print("📷 相机已关闭")
    }
}
LiveProject/data/DeviceInfo.swift
@@ -10,7 +10,7 @@
    let name:String
    let type:StreamType
    let deviceId:String;
    var icon : (any View)? = nil;
    var icon : IconInfo? = nil;
    
    func hash(into hasher: inout Hasher){
        hasher.combine(deviceId)
LiveProject/data/IconInfo.swift
New file
@@ -0,0 +1,12 @@
//
//  IconInfo.swift
//  LiveProject
//
//  Created by 倪路朋 on 7/4/25.
//
import SwiftUI
struct IconInfo{
    var name:String
    var size:CGSize = CGSize(width: 20, height: 20)
}
LiveProject/enum/Icons.swift
New file
@@ -0,0 +1,24 @@
//
//  Icons.swift
//  LiveProject
//
//  Created by 倪路朋 on 7/4/25.
//
import SwiftUI
struct Icons{
    static let CAMERA = IconInfo(name: "camera",size: CGSize(width: 25, height: 20))
    static let MIC = IconInfo(name: "mic",size: CGSize(width: 15, height: 23))
    static let MIC_MUTE = IconInfo(name: "mic.slash",size: CGSize(width: 20, height: 23))
    static let PORTRAIT = IconInfo(name: "ipad",size: CGSize(width: 18, height: 23))
    static let LANDSCAPE = IconInfo(name: "ipad.landscape",size: CGSize(width: 25, height: 20))
    static let BACK = IconInfo(name: "arrow.left",size: CGSize(width: 25, height: 20))
    static let SPEAKER = IconInfo(name: "speaker",size: CGSize(width: 18, height: 23))
    static let SPEAKER_MUTE = IconInfo(name: "speaker.slash",size: CGSize(width: 18, height: 23))
    static let IMAGE = IconInfo(name: "photo",size: CGSize(width: 25, height: 23))
    static let IMAGE_MUTE = IconInfo(name: "photo.slash",size: CGSize(width: 25, height: 23))
    static let ROTATE_LEFT = IconInfo(name: "rotate.left",size: CGSize(width: 25, height: 25))
    static let ROTATE_RIGHT = IconInfo(name: "rotate.right",size: CGSize(width: 25, height: 25))
    static let INFO = IconInfo(name: "info.circle",size: CGSize(width: 25, height: 25))
    static let PAINT = IconInfo(name: "paintpalette",size: CGSize(width: 25, height: 25))
}
LiveProject/shape/IconBack.swift
File was deleted
LiveProject/shape/IconCamera.swift
File was deleted
LiveProject/shape/IconLandscape.swift
File was deleted
LiveProject/shape/IconMic.swift
File was deleted
LiveProject/shape/IconPortrait.swift
File was deleted
LiveProject/shape/IconRect.swift
File was deleted
LiveProject/tool/CameraHelper.swift
File was deleted
LiveProject/tool/H264Encoder.swift
New file
@@ -0,0 +1,166 @@
//
//  H264Encoder.swift
//  LiveProject
//
//  Created by 倪路朋 on 6/28/25.
//
import Foundation
import AVFoundation
import VideoToolbox
class H264Encoder {
    private var session: VTCompressionSession?
    private let width: Int
    private let height: Int
    private let fps: Int
    private let bitrate: Int
    private let converter : PixelBufferConverter  = PixelBufferConverter()
    var onEncoded: ((Data, CMTime, Bool) -> Void)?
    init(width: Int, height: Int, fps: Int, bitrate: Int) {
        self.width = width
        self.height = height
        self.fps = fps
        self.bitrate = bitrate
        setupSession()
    }
    private func setupSession() {
        let status = VTCompressionSessionCreate(
            allocator: nil,
            width: Int32(width),
            height: Int32(height),
            codecType: kCMVideoCodecType_H264,
            encoderSpecification: nil,
            imageBufferAttributes: nil,
            compressedDataAllocator: nil,
            outputCallback: encodeCallback,
            refcon: UnsafeMutableRawPointer(Unmanaged.passRetained(self).toOpaque()),
            compressionSessionOut: &session
        )
        guard status == noErr, let session = session else {
            print("❌ Failed to create session: \(status)")
            return
        }
        VTSessionSetProperty(session, key: kVTCompressionPropertyKey_RealTime, value: kCFBooleanTrue)
        VTSessionSetProperty(session, key: kVTCompressionPropertyKey_AverageBitRate, value: bitrate as CFTypeRef)
        let frameInterval = Int(fps)
        VTSessionSetProperty(session, key: kVTCompressionPropertyKey_MaxKeyFrameInterval, value: frameInterval as CFTypeRef)
        VTSessionSetProperty(session, key: kVTCompressionPropertyKey_ExpectedFrameRate, value: frameInterval as CFTypeRef)
        VTCompressionSessionPrepareToEncodeFrames(session)
    }
    func encode(pixelBuffer: CVPixelBuffer, pts: CMTime) {
        guard let session = session else {
            print("❌ Session is nil")
            return
        }
        let format = CVPixelBufferGetPixelFormatType(pixelBuffer)
        let supportedFormats: [OSType] = [
            kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,
            kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange
        ]
        switch format{
        case kCVPixelFormatType_32BGRA:
            print("32BGRA")
            break;
        case kCVPixelFormatType_32ARGB:
            print("32ARGB")
            break;
        default:
            print("????")
            break;
        }
        if let buffer = converter.convertBGRAtoNV12(pixelBuffer) {
            print("converter \(decodeOSType(CVPixelBufferGetPixelFormatType(buffer)))")
            let timestamp = CMTimeMake(value: Int64(CACurrentMediaTime() * 1000), timescale: 1000)
            var flags = VTEncodeInfoFlags()
            let status = VTCompressionSessionEncodeFrame(
                session,
                imageBuffer:buffer ,
                presentationTimeStamp: pts,
                duration: .invalid,
                frameProperties: nil,
                sourceFrameRefcon: nil,
                infoFlagsOut: &flags
            )
            if status != noErr {
                print("❌ Encoding failed: \(status)")
            }
        }
    }
    func finish() {
        guard let session = session else { return }
        VTCompressionSessionCompleteFrames(session, untilPresentationTimeStamp: .invalid)
    }
    func invalidate() {
        guard let session = session else { return }
        VTCompressionSessionInvalidate(session)
        self.session = nil
    }
    func decodeOSType(_ format: OSType) -> String {
        let characters = [
            UInt8((format >> 24) & 0xFF),
            UInt8((format >> 16) & 0xFF),
            UInt8((format >> 8) & 0xFF),
            UInt8(format & 0xFF)
        ]
        return String(bytes: characters, encoding: .ascii) ?? "????"
    }
}
// MARK: - VideoToolbox Callback
private func encodeCallback(
    outputCallbackRefCon: UnsafeMutableRawPointer?,
    sourceFrameRefCon: UnsafeMutableRawPointer?,
    status: OSStatus,
    infoFlags: VTEncodeInfoFlags,
    sampleBuffer: CMSampleBuffer?
) {
    guard
        status == noErr,
        let sampleBuffer = sampleBuffer,
        CMSampleBufferDataIsReady(sampleBuffer),
        let ref = outputCallbackRefCon
    else { return }
    let encoder = Unmanaged<H264Encoder>.fromOpaque(ref).takeUnretainedValue()
    guard let blockBuffer = CMSampleBufferGetDataBuffer(sampleBuffer) else { return }
    var length = 0
    var dataPointer: UnsafeMutablePointer<Int8>?
    guard CMBlockBufferGetDataPointer(blockBuffer, atOffset: 0, lengthAtOffsetOut: nil, totalLengthOut: &length, dataPointerOut: &dataPointer) == noErr else {
        return
    }
    let data = Data(bytes: dataPointer!, count: length)
    var isKeyframe = true
    if let attachments = CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, createIfNecessary: false),
       let dict = CFArrayGetValueAtIndex(attachments, 0) {
        let d = unsafeBitCast(dict, to: CFDictionary.self) as NSDictionary
        if let notSync = d[kCMSampleAttachmentKey_NotSync] as? Bool {
            isKeyframe = !notSync
        }
    }
    let pts = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
    encoder.onEncoded?(data, pts, isKeyframe)
    // ⚠️ 这里必须 release 与 passRetained 配对
    Unmanaged<H264Encoder>.fromOpaque(ref).release()
}
LiveProject/tool/MetalPixelConverter.swift
New file
@@ -0,0 +1,227 @@
//
//  MetalPixelConverter.swift
//  LiveProject
//
//  Created by 倪路朋 on 7/1/25.
//
import Metal
import MetalKit
import CoreVideo
final class MetalPixelConverter {
    // MARK: - Properties
    private let device: MTLDevice
    private var textureCache: CVMetalTextureCache?
    private var commandQueue: MTLCommandQueue?
    private var computePipeline: MTLComputePipelineState?
    // MARK: - Initialization
    init?(metalDevice: MTLDevice? = MTLCreateSystemDefaultDevice()) {
        guard let device = metalDevice else {
            print("⚠️ Metal 不可用")
            return nil
        }
        self.device = device
        // 1. 创建纹理缓存
        guard CVMetalTextureCacheCreate(
            kCFAllocatorDefault,
            nil,
            device,
            nil,
            &textureCache
        ) == kCVReturnSuccess else {
            print("❌ 创建 Metal 纹理缓存失败")
            return nil
        }
        // 2. 创建命令队列
        self.commandQueue = device.makeCommandQueue()
        // 3. 加载着色器
        do {
            self.computePipeline = try makeComputePipeline(device: device)
        } catch {
            print("❌ 加载着色器失败: \(error)")
            return nil
        }
    }
    // MARK: - Public Methods
    /// 将 BGRA CVPixelBuffer 转换为 NV12 CVPixelBuffer
    func convertBGRAtoNV12(
        _ inputBuffer: CVPixelBuffer,
        completion: @escaping (Result<CVPixelBuffer, Error>) -> Void
    ) {
        // 0. 验证输入格式
        guard CVPixelBufferGetPixelFormatType(inputBuffer) == kCVPixelFormatType_32BGRA else {
            completion(.failure(ConversionError.invalidInputFormat))
            return
        }
        // 1. 创建输出 NV12 Buffer
        let (width, height) = (CVPixelBufferGetWidth(inputBuffer), CVPixelBufferGetHeight(inputBuffer))
        guard let outputBuffer = createNV12PixelBuffer(width: width, height: height) else {
            completion(.failure(ConversionError.outputBufferCreationFailed))
            return
        }
        // 2. 异步处理(避免阻塞主线程)
        DispatchQueue.global(qos: .userInitiated).async { [weak self] in
            guard let self = self else { return }
            do {
                let result = try self.performConversion(
                    inputBuffer: inputBuffer,
                    outputBuffer: outputBuffer
                )
                DispatchQueue.main.async {
                    completion(.success(result))
                }
            } catch {
                DispatchQueue.main.async {
                    completion(.failure(error))
                }
            }
        }
    }
    // MARK: - Private Methods
    private func makeComputePipeline(device: MTLDevice) throws -> MTLComputePipelineState {
        // 1. 获取默认 Metal 库(需在项目中添加 .metal 文件)
        let library = try device.makeDefaultLibrary(bundle: Bundle(for: Self.self))
        // 2. 加载着色器函数
        guard let kernelFunction = library.makeFunction(name: "bgraToNV12") else {
            throw ConversionError.shaderNotFound
        }
        // 3. 创建计算管线
        return try device.makeComputePipelineState(function: kernelFunction)
    }
    private func createNV12PixelBuffer(width: Int, height: Int) -> CVPixelBuffer? {
        var pixelBuffer: CVPixelBuffer?
        let status = CVPixelBufferCreate(
            kCFAllocatorDefault,
            width,
            height,
            kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange,
            nil,
            &pixelBuffer
        )
        return status == kCVReturnSuccess ? pixelBuffer : nil
    }
    private func performConversion(
        inputBuffer: CVPixelBuffer,
        outputBuffer: CVPixelBuffer
    ) throws -> CVPixelBuffer {
        // 1. 锁定缓冲区
        CVPixelBufferLockBaseAddress(inputBuffer, .readOnly)
        CVPixelBufferLockBaseAddress(outputBuffer, [])
        defer {
            CVPixelBufferUnlockBaseAddress(inputBuffer, .readOnly)
            CVPixelBufferUnlockBaseAddress(outputBuffer, [])
        }
        // 2. 创建 Metal 纹理
        guard let textureCache = textureCache,
              let inputTexture = createMetalTexture(
                from: inputBuffer,
                pixelFormat: .bgra8Unorm,
                textureCache: textureCache
              ),
              let yTexture = createMetalTexture(
                from: outputBuffer,
                planeIndex: 0,
                pixelFormat: .r8Unorm,
                textureCache: textureCache
              ),
              let uvTexture = createMetalTexture(
                from: outputBuffer,
                planeIndex: 1,
                pixelFormat: .rg8Unorm,
                textureCache: textureCache
              ) else {
            throw ConversionError.textureCreationFailed
        }
        // 3. 执行 Metal 计算
        guard let commandBuffer = commandQueue?.makeCommandBuffer(),
              let encoder = commandBuffer.makeComputeCommandEncoder() else {
            throw ConversionError.metalCommandFailed
        }
        encoder.setComputePipelineState(computePipeline!)
        encoder.setTexture(inputTexture, index: 0)
        encoder.setTexture(yTexture, index: 1)
        encoder.setTexture(uvTexture, index: 2)
        // 4. 调度计算任务
        let threadgroupSize = MTLSize(width: 16, height: 16, depth: 1)
        let threadgroupCount = MTLSize(
            width: (inputTexture.width + threadgroupSize.width - 1) / threadgroupSize.width,
            height: (inputTexture.height + threadgroupSize.height - 1) / threadgroupSize.height,
            depth: 1
        )
        encoder.dispatchThreadgroups(threadgroupCount, threadsPerThreadgroup: threadgroupSize)
        encoder.endEncoding()
        // 5. 提交并等待完成
        commandBuffer.commit()
        commandBuffer.waitUntilCompleted()
        return outputBuffer
    }
    private func createMetalTexture(
        from pixelBuffer: CVPixelBuffer,
        planeIndex: Int = 0,
        pixelFormat: MTLPixelFormat,
        textureCache: CVMetalTextureCache
    ) -> MTLTexture? {
        let width = CVPixelBufferGetWidthOfPlane(pixelBuffer, planeIndex)
        let height = CVPixelBufferGetHeightOfPlane(pixelBuffer, planeIndex)
        var cvMetalTexture: CVMetalTexture?
        let status = CVMetalTextureCacheCreateTextureFromImage(
            nil,
            textureCache,
            pixelBuffer,
            nil,
            pixelFormat,
            width,
            height,
            planeIndex,
            &cvMetalTexture
        )
        guard status == kCVReturnSuccess, let texture = cvMetalTexture else {
            return nil
        }
        return CVMetalTextureGetTexture(texture)
    }
    // MARK: - Error Handling
    enum ConversionError: Error, LocalizedError {
        case invalidInputFormat
        case outputBufferCreationFailed
        case shaderNotFound
        case textureCreationFailed
        case metalCommandFailed
        var errorDescription: String? {
            switch self {
            case .invalidInputFormat: return "输入格式必须是 BGRA"
            case .outputBufferCreationFailed: return "无法创建 NV12 输出缓冲区"
            case .shaderNotFound: return "找不到 Metal 着色器"
            case .textureCreationFailed: return "无法创建 Metal 纹理"
            case .metalCommandFailed: return "Metal 命令执行失败"
            }
        }
    }
}
LiveProject/tool/MetalRenderer.swift
@@ -4,68 +4,99 @@
//  渲染工具
//  Created by 倪路朋 on 6/26/25.
//
import Foundation
import Metal
import MetalKit
import AVFoundation
class MetalRenderer: NSObject, MTKViewDelegate {
    private var device: MTLDevice!
    private var commandQueue: MTLCommandQueue!
    private var textureCache: CVMetalTextureCache!
    private let device: MTLDevice
    private let commandQueue: MTLCommandQueue
    private let ciContext: CIContext
    private var currentPixelBuffer: CVPixelBuffer?
    private let textureCache: CVMetalTextureCache
    func setup(view: MTKView) {
        self.device = view.device
        self.commandQueue = device.makeCommandQueue()
        CVMetalTextureCacheCreate(nil, nil, device, nil, &textureCache)
    init(mtkView: MTKView) {
        guard let device = MTLCreateSystemDefaultDevice(),
              let commandQueue = device.makeCommandQueue() else {
            fatalError("Unable to create Metal device or command queue")
    }
    func updateFrame(pixelBuffer: CVPixelBuffer) {
        self.currentPixelBuffer = pixelBuffer
    }
        self.device = device
        self.commandQueue = commandQueue
        self.ciContext = CIContext(mtlDevice: device)
    func mtkView(_ view: MTKView, drawableSizeWillChange size: CGSize) {}
        var tmpCache: CVMetalTextureCache?
        CVMetalTextureCacheCreate(nil, nil, device, nil, &tmpCache)
        guard let textureCache = tmpCache else {
            fatalError("Unable to create texture cache")
        }
        self.textureCache = textureCache
        super.init()
        // ✅ 设置驱动渲染的关键代码
        mtkView.device = device
        mtkView.framebufferOnly = false
        mtkView.isPaused = false
        mtkView.enableSetNeedsDisplay = false
        mtkView.delegate = self
        print("MetalRenderer init")
    }
    func draw(in view: MTKView) {
        guard let drawable = view.currentDrawable,
              let descriptor = view.currentRenderPassDescriptor,
              let pixelBuffer = currentPixelBuffer else { return }
        let size = view.drawableSize
        //print("🧾 drawableSize = \(size)")
        var textureRef: CVMetalTexture?
        let width = CVPixelBufferGetWidth(pixelBuffer)
        let height = CVPixelBufferGetHeight(pixelBuffer)
        let status = CVMetalTextureCacheCreateTextureFromImage(
            nil, textureCache, pixelBuffer, nil,
            .bgra8Unorm, width, height, 0, &textureRef)
        guard status == kCVReturnSuccess,
              let cvTexture = textureRef,
              let texture = CVMetalTextureGetTexture(cvTexture) else { return }
        let commandBuffer = commandQueue.makeCommandBuffer()!
        let encoder = commandBuffer.makeRenderCommandEncoder(descriptor: descriptor)!
        encoder.setFragmentTexture(texture, index: 0)
        encoder.endEncoding()
        // 简单拷贝(不做 shader 处理)
        let blitEncoder = commandBuffer.makeBlitCommandEncoder()!
        let dstTexture = drawable.texture
        if dstTexture.width != texture.width || dstTexture.height != texture.height {
            print("❌ 尺寸不一致,无法 blit:src = \(texture.width)x\(texture.height), dst = \(dstTexture.width)x\(dstTexture.height)")
        if !size.width.isFinite || !size.height.isFinite || size.width <= 0 || size.height <= 0 {
            print("❌ 非法尺寸,跳过渲染 \(size)")
            return
        }
        blitEncoder.copy(from: texture,
                         sourceSlice: 0,
                         sourceLevel: 0,
                         sourceOrigin: MTLOrigin(x: 0, y: 0, z: 0),
                         sourceSize: MTLSize(width: width, height: height, depth: 1),
        guard let drawable = view.currentDrawable,
              let commandBuffer = commandQueue.makeCommandBuffer(),
              let pixelBuffer = currentPixelBuffer else {
            return
        }
        let drawableSize = view.drawableSize
        guard drawableSize.width > 0, drawableSize.height > 0 else { return }
        // 加方向修正:顺时针旋转90度
        var ciImage = CIImage(cvPixelBuffer: pixelBuffer).oriented(.right)
        // 等比缩放后居中
        let sourceExtent = ciImage.extent
        let scaleX = drawableSize.width / sourceExtent.width
        let scaleY = drawableSize.height / sourceExtent.height
        let scale = min(scaleX, scaleY)
        let scaledImage = ciImage.transformed(by: CGAffineTransform(scaleX: scale, y: scale))
        let xOffset = (drawableSize.width - scaledImage.extent.width) / 2
        let yOffset = (drawableSize.height - scaledImage.extent.height) / 2
        let translatedImage = scaledImage.transformed(by: CGAffineTransform(translationX: xOffset, y: yOffset))
        // 渲染
        ciContext.render(translatedImage,
                         to: drawable.texture,
                         destinationSlice: 0,
                         destinationLevel: 0,
                         destinationOrigin: MTLOrigin(x: 0, y: 0, z: 0))
        blitEncoder.endEncoding()
                         commandBuffer: commandBuffer,
                         bounds: CGRect(origin: .zero, size: drawableSize),
                         colorSpace: CGColorSpaceCreateDeviceRGB())
        commandBuffer.present(drawable)
        commandBuffer.commit()
        print("绘制画面")
        //print("绘制画面")
    }
    func mtkView(_ view: MTKView, drawableSizeWillChange size: CGSize) {
        // No-op: Handle size change if needed
    }
    func display(pixelBuffer: CVPixelBuffer) {
        self.currentPixelBuffer = pixelBuffer
        //print("display")
        //刷新
    }
}
LiveProject/tool/PixelBufferConverter.swift
New file
@@ -0,0 +1,44 @@
//
//  PixelBufferConverter.swift
//  LiveProject
//
//  Created by 倪路朋 on 6/30/25.
//
import Foundation
import CoreImage
import CoreVideo
class PixelBufferConverter {
    private let ciContext = CIContext()
    func convertBGRAtoNV12(_ srcBuffer: CVPixelBuffer) -> CVPixelBuffer? {
        let width = CVPixelBufferGetWidth(srcBuffer)
        let height = CVPixelBufferGetHeight(srcBuffer)
        let attrs: [CFString: Any] = [
            kCVPixelBufferPixelFormatTypeKey: kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,
            kCVPixelBufferWidthKey: width,
            kCVPixelBufferHeightKey: height,
            kCVPixelBufferIOSurfacePropertiesKey: [:]
        ]
        var dstBuffer: CVPixelBuffer?
        let status = CVPixelBufferCreate(nil, width, height, kCVPixelFormatType_420YpCbCr8BiPlanarFullRange, attrs as CFDictionary, &dstBuffer)
        guard status == kCVReturnSuccess, let output = dstBuffer else {
            print("❌ 创建 NV12 失败,状态: \(status)")
            return nil
        }
        // 渲染
        let ciImage = CIImage(cvPixelBuffer: srcBuffer)
        CVPixelBufferLockBaseAddress(output, [])
        ciContext.render(ciImage, to: output)
        CVPixelBufferUnlockBaseAddress(output, [])
        return output
    }
}
LiveProject/views/FlowLayout.swift
@@ -5,148 +5,66 @@
//  Created by 倪路朋 on 6/26/25.
//
import SwiftUI
import SwiftUI
/// 完全自定义的自动换行布局容器
struct FlowLayout<Data: RandomAccessCollection, Content: View>: View where Data.Element: Hashable {
    // MARK: - 属性
struct FlowLayout: Layout {
    var spacing: CGFloat = 8
    var lineSpacing: CGFloat = 8
    
    /// 要显示的数据集合
    let data: Data
    /// 水平间距
    let horizontalSpacing: CGFloat
    /// 垂直间距
    let verticalSpacing: CGFloat
    /// 对齐方式
    let alignment: HorizontalAlignment
    /// 内容构建闭包
    let content: (Data.Element) -> Content
    /// 总高度状态
    @State private var totalHeight: CGFloat = 0
    // MARK: - 初始化
    /// 初始化FlowLayout
    /// - Parameters:
    ///   - data: 要显示的数据集合
    ///   - horizontalSpacing: 水平间距,默认为8
    ///   - verticalSpacing: 垂直间距,默认为8
    ///   - alignment: 对齐方式,默认为.leading
    ///   - content: 内容构建闭包
    init(
        _ data: Data,
        horizontalSpacing: CGFloat = 8,
        verticalSpacing: CGFloat = 8,
        alignment: HorizontalAlignment = .leading,
        @ViewBuilder content: @escaping (Data.Element) -> Content
    ) {
        self.data = data
        self.horizontalSpacing = horizontalSpacing
        self.verticalSpacing = verticalSpacing
        self.alignment = alignment
        self.content = content
    }
    // MARK: - 主体视图
    var body: some View {
        GeometryReader { geometry in
            self.contentView(in: geometry)
                .background(
                    HeightReader(height: $totalHeight)
                )
        }
        .frame(height: totalHeight)
    }
    // MARK: - 私有方法
    /// 构建内容视图
    private func contentView(in geometry: GeometryProxy) -> some View {
    func sizeThatFits(proposal: ProposedViewSize, subviews: Subviews, cache: inout ()) -> CGSize {
        var width: CGFloat = 0
        var height: CGFloat = 0
        var lastHeight: CGFloat = 0
        var currentLineWidth: CGFloat = 0
        var currentLineHeight: CGFloat = 0
        let maxWidth = proposal.width ?? .infinity
        
        return ZStack(alignment: Alignment(horizontal: alignment, vertical: .top)) {
            ForEach(data.map { $0 }, id: \.self) { item in
                content(item)
                    .padding(.trailing, horizontalSpacing)
                    .padding(.bottom, verticalSpacing)
                    .alignmentGuide(.leading) { dimensions in
                        // 检查是否需要换行
                        if abs(width - dimensions.width) > geometry.size.width {
                            width = 0
                            height += lastHeight + verticalSpacing
                        }
                        let result = width
                        // 更新宽度计算
                        if item == data.last {
                            width = 0 // 重置为0,最后一项
        for view in subviews {
            let size = view.sizeThatFits(.unspecified)
            if currentLineWidth + size.width > maxWidth {
                width = max(width, currentLineWidth)
                height += currentLineHeight + lineSpacing
                currentLineWidth = size.width
                currentLineHeight = size.height
                        } else {
                            width -= dimensions.width + horizontalSpacing
                        }
                        // 记录当前行高度
                        lastHeight = dimensions.height
                        return result
                    }
                    .alignmentGuide(.top) { dimensions in
                        let result = height
                        // 如果是最后一项,更新总高度
                        if item == data.last {
                            height += lastHeight + verticalSpacing
                        }
                        return result
                    }
            }
        }
                currentLineWidth += size.width + spacing
                currentLineHeight = max(currentLineHeight, size.height)
    }
}
// MARK: - 高度读取器
        width = max(width, currentLineWidth)
        height += currentLineHeight
/// 用于读取视图高度的辅助视图
private struct HeightReader: View {
    @Binding var height: CGFloat
        return CGSize(width: width, height: height)
    }
    
    var body: some View {
        GeometryReader { geometry in
            Color.clear
                .preference(
                    key: HeightPreferenceKey.self,
                    value: geometry.size.height
    func placeSubviews(in bounds: CGRect, proposal: ProposedViewSize, subviews: Subviews, cache: inout ()) {
        var x: CGFloat = 0
        var y: CGFloat = 0
        var lineHeight: CGFloat = 0
        for view in subviews {
            let size = view.sizeThatFits(.unspecified)
            if x + size.width > bounds.width {
                x = 0
                y += lineHeight + lineSpacing
                lineHeight = 0
            }
            view.place(
                at: CGPoint(x: bounds.minX + x, y: bounds.minY + y),
                proposal: ProposedViewSize(width: size.width, height: size.height)
                )
        }
        .onPreferenceChange(HeightPreferenceKey.self) { newHeight in
            DispatchQueue.main.async {
                self.height = newHeight
            }
        }
    }
}
// MARK: - 高度偏好键
/// 用于传递高度值的PreferenceKey
private struct HeightPreferenceKey: PreferenceKey {
    static var defaultValue: CGFloat = 0
    static func reduce(value: inout CGFloat, nextValue: () -> CGFloat) {
        value = nextValue()
            x += size.width + spacing
            lineHeight = max(lineHeight, size.height)
    }
}
}
// MARK: - 使用示例
struct FlowLayoutExample: View {
    let tags = [
        "Swift", "SwiftUI", "UIKit", "Combine", "Core Data",
        "Xcode", "Interface Builder", "Core Animation", "ARKit",
        "Metal", "Core ML", "Vision", "MapKit", "CloudKit"
        "Swift"
    ]
    
    @State private var newTag = ""
@@ -154,14 +72,22 @@
    
    var body: some View {
        VStack {
            FlowLayout(customTags + tags, horizontalSpacing: 10, verticalSpacing: 10) { tag in
                MButton(text:tag){
            VStack(spacing: 20) {
                FlowLayout(){
                    
                        ForEach(tags, id: \.self) { item in
                            Text(item)
                                .padding(.horizontal, 12)
                                .padding(.vertical, 6)
                                .background(Color.blue.opacity(0.2))
                                .cornerRadius(8)
                }
                }.frame(alignment:.leading)
                .background(Color.red)
            }
            .padding()
            .animation(.default, value: customTags)
            .frame(maxWidth: .infinity,alignment:.leading)
        }
        .background(Color.black)
    }
    
    private func addTag() {
LiveProject/views/LTextField.swift
New file
@@ -0,0 +1,73 @@
//
//  LTextField.swift
//  LiveProject
//
//  Created by 倪路朋 on 7/1/25.
//
import SwiftUI
class LText:ObservableObject{
    @Published var input = ""
    func update( _ text : String){
        input = text;
    }
}
struct LTextField: View {
    var hint:String = "请输入RTMP直播地址";
    @EnvironmentObject var text:LText;
    @State var onFocus = false;
    @FocusState var isfocused:Bool
    @State var inputType : UIKeyboardType = UIKeyboardType.default;
    @State var strl = "";
    var lines = 1;
    var body: some View {
        VStack(alignment: .leading) {// iOS
            let binding = Binding<String>(get: {
                //print(self.text.input);
                return text.input
            }, set: { str in
                text.update(str)
            })
            TextField(hint,text: binding){ change in
                    print(hint+" \(change)")
                    self.onFocus = change
                } onCommit: {
                    print("onCommit")
                }
                .font(Font.system(size: 16))
                .focused($isfocused)
                .foregroundColor(Color.colorText)
                .padding(.leading,24)
                .padding(.trailing,24)
                .cornerRadius(12)
                .frame(height: 60)
                .keyboardType(inputType)
                .onSubmit {
                    print("onSubmit")
                }
                .overlay(
                    RoundedRectangle(cornerRadius: 12, style: .continuous)
                        .stroke(Color.colorText, lineWidth: 2)
                )
        }.frame(minWidth: 0, maxWidth: .infinity, alignment: .topLeading)
            .onTapGesture {
                isfocused = true
                print("Click ATextField button")
                //textField.focused(SwiftUI.FocusState<Bool>)
            }
    }
}
struct LTextField_Previews: PreviewProvider {
    static var previews: some View {
        LTextField().environmentObject(LText())
    }
}
LiveProject/views/MButton.swift
@@ -12,7 +12,7 @@
    
    var valid :ValidState = .VALID;
    
    var icon : (any View)? = nil;
    var icon : IconInfo? = nil;
    
    var text : String? = nil;
    
@@ -30,19 +30,23 @@
            
            ZStack() {// iOS
                HStack() {// iOS
                    if let iconView = icon {
                        AnyView(iconView)
                    if let info = icon{
                        Image(systemName: info.name)
                            .resizable()
                            .frame(width: info.size.width, height: info.size.height)
                            .aspectRatio(contentMode: .fit)
                            .foregroundColor(Color.white)
                    }
                    if let str = text {
                        Text(str)
                            .font(Font.system(size: 16))
                            .foregroundColor(Color.init("ColorWhite"))
                            .foregroundColor(Color.white)
                            .frame(width: .infinity, height: 40)
                    }
                }.frame(minWidth: 40, maxHeight: 40).padding(EdgeInsets(top: 0, leading: 15, bottom: 0, trailing: 15))
            }.frame(maxHeight: 40).background(
                RoundedRectangle(cornerRadius: 20, style: .continuous)
                    .fill(Color.init(valid == .INVALID ?"ColorGray":"ColorText"))
                    .fill(valid == .INVALID ? Color.colorGray : Color.colorText)
            )
            
        }.buttonStyle( TextBtnStyle())
@@ -51,6 +55,6 @@
struct MButton_Previews: PreviewProvider {
    static var previews: some View {
        MButton(icon: IconPortrait())
        MButton(icon: Icons.IMAGE_MUTE)
    }
}
LiveProject/views/TitleBarView.swift
@@ -12,7 +12,7 @@
    @Environment(\.presentationMode) var presentationMode
    
    var title = ""
    var iconBack  = IconBack();
    var iconBack  = Icons.BACK;
    var imgRight = "";
    var titleColor = Color.colorText
    
@@ -24,7 +24,11 @@
                    print("Click back button")
                    self.presentationMode.wrappedValue.dismiss()
                }) {
                    iconBack.stroke(Color.primary, lineWidth: 2.5).frame(width: 18,height: 14)
                    Image(systemName: iconBack.name)
                        .resizable()
                        .frame(width: iconBack.size.width, height: iconBack.size.height)
                        .aspectRatio(contentMode: .fit)
                        .foregroundColor(Color.white)
                }
                Spacer()
                Text(title).foregroundColor(titleColor)
LiveProject/views/VideoRendererView.swift
@@ -8,19 +8,33 @@
import MetalKit
struct VideoRendererView: UIViewRepresentable {
    let renderer: MetalRenderer  // 自定义 Metal 渲染器,支持传入 RGBA/YUV 数据帧
    @Binding var pixelBuffer: CVPixelBuffer?
    //用 Coordinator 缓存实例
    func makeCoordinator() -> Coordinator {
        return Coordinator()
    }
    func makeUIView(context: Context) -> MTKView {
        let view = MTKView()
        view.device = MTLCreateSystemDefaultDevice()
        view.colorPixelFormat = .bgra8Unorm
        view.clearColor = MTLClearColor(red: 0.2, green: 0.5, blue: 0.7, alpha: 1.0)
        view.delegate = renderer
        view.isPaused = false
        view.enableSetNeedsDisplay = false
        renderer.setup(view: view)
        return view
        return context.coordinator.mtkView
    }
    func updateUIView(_ uiView: MTKView, context: Context) {}
    func updateUIView(_ uiView: MTKView, context: Context) {
        if let buffer = pixelBuffer {
            //print("updateUIView")
            context.coordinator.renderer.display(pixelBuffer: buffer)
}
    }
    class Coordinator {
        let mtkView: MTKView
        let renderer: MetalRenderer
        init() {
            print("📦 MetalRendererWrapper 初始化了")
            mtkView = MTKView()
            renderer = MetalRenderer(mtkView: mtkView)
        }
    }
}