我用electron 实现了一个录屏软件,使用 desktopCapturer 可以录制全屏,但是我想局部或者区域录屏,就不太行,electronjs 好像不能使用 CropTarget 这个类,只能比较曲折的用ffmpeg实现的。大佬们,有什么优雅的方法吗?
需求: 声音动态生成, 视频固定来源. 代码中使用的是testsrc. 代码一直卡在rawvedio写入命名管道哪也没撒错误 示例代码: import subprocess import os from threading import Thread import numpy as np from transformers import VitsModel, VitsTokenizer, PreTrainedTokenizerBase import torch import ffmpeg def read_frame_from_stdout(vedioProcess, width, height): frame_size = width * height * 3 input_bytes = vedioProcess.stdout.read(frame_size) if not input_bytes: return assert len(input_bytes) == frame_size return np.frombuffer(input_bytes, np.uint8).reshape([height, width, 3]) def writer(vedioProcess, pipe_name, chunk_size): width = 640 height = 480 while True: input_frame = read_frame_from_stdout(vedioProcess, width, height) print('read frame is:' % input_frame) if input_frame is None: print('read frame is: None') break frame = input_frame * 0.3 os.write(fd_pipe, (frame.astype(np.uint8).tobytes())) # Closing the pipes as closing files. os.close(fd_pipe) # 加载TTS模型 def loadModel(device: str): model = VitsModel.from_pretrained("./mms-tts-eng", local_files_only=True).to(device) # acebook/mms-tts-deu tokenizer = VitsTokenizer.from_pretrained("./mms-tts-eng", local_files_only=True) return model, tokenizer # 将32位浮点转成16位整数, 适用于:16000(音频采样率) def covertFl32ToInt16(nyArr): return np.int16(nyArr / np.max(np.abs(nyArr)) * 32767) def audioWriteInPipe(nyArr, audioPipeName): # Write to named pipe as writing to a file (but write the data in small chunks). os.write(audioPipeName, covertFl32ToInt16(nyArr.squeeze()).tobytes()) # Write 1024 bytes of data to fd_pipe # 生成numpy def generte(prompt:str, device: str, model: VitsModel, tokenizer: PreTrainedTokenizerBase): inputs = tokenizer(prompt, return_tensors="pt").to(device) # with torch.no_grad(): # output = model(**inputs).waveform return output.cpu().numpy() def soundPipeWriter(model, device, tokenizer, pipeName): fd_pipe = os.open(pipeName, os.O_WRONLY) filepath = 'night.txt' for content in read_file(filepath): print(content) audioWriteInPipe(generte(prompt=content, device=device, model=model, tokenizer=tokenizer), audioPipeName=fd_pipe) os.close(fd_pipe) # 读取文件源 def read_file(filepath:str): with open(filepath) as fp: for content in fp: yield content def record(vedioProcess, model, tokenizer, device): # Names of the "Named pipes" pipeA = "audio_pipe1" pipeV = "vedio_pipe2" # Create "named pipes". os.mkfifo(pipeA) os.mkfifo(pipeV) # Open FFmpeg as sub-process # Use two audio input streams: # 1. Named pipe: "audio_pipe1" # 2. Named pipe: "audio_pipe2" process = ( ffmpeg .concat(ffmpeg.input("pipe:vedio_pipe2"), ffmpeg.input("pipe:audio_pipe1"), v=1, a=1) .output("merge_audio_vedio.mp4", pix_fmt='yuv480p', vcodec='copy', acodec='aac') .run_async(pipe_stderr=True) ) # Initialize two "writer" threads (each writer writes data to named pipe in chunks of 1024 bytes). thread1 = Thread(target=soundPipeWriter, args=(model, device, tokenizer, pipeA)) # thread1 writes samp1 to pipe1 thread2 = Thread(target=writer, args=(vedioProcess, pipeV, 1024)) # thread2 writes samp2 to pipe2 # Start the two threads thread1.start() thread2.start() # Wait for the two writer threads to finish thread1.join() thread2.join() process.wait() # Wait for FFmpeg sub-process to finish # Remove the "named pipes". os.unlink(pipeV) os.unlink(pipeA) if __name__ == "__main__": device: str = "cuda:0" if torch.cuda.is_available() else "cpu" model, tokenizer = loadModel(device=device) # make lavfi-testSrc 60s mp4 vedioProcess = ( ffmpeg .input('testsrc=duration=10:size=640x480:rate=30', f="lavfi", t=60) .output('pipe:', format='rawvideo', pix_fmt='rgb24') .run_async(pipe_stdout=True) ) # record(vedioProcess, model, tokenizer, device) vedioProcess.wait() vscode中截图: https://wmprod.oss-cn-shanghai.aliyuncs.com/c/user/20240925/f010760dfc6c74520f9b3197230485b2.png
如何将不断生成的音频动态写入ffmpeg的流中, 动态音频是通过一个外部程序生成。想通过ffmpeg把不断生成的音频推送到指定的rtmp地址上 项目代码使用python. 已安装的库: ffmpeg-python, python版本:3.10
有个透明视频 text.mov 和一个背景图 bg.jpg(将作为视频背景),合并之后视频的质量明显降低,设置了帧率、码流、编码方式都不行,有没有解决过类似问题的,悬赏千元求解,谢谢诸位!!!
我需要依次调用三次subprocess. 发现最后一个没有执行? 在流媒体服务器的控制台上只能看到: aac和mp4的rtmp信息, 看不到live的rtmp连接 参考代码: if __name__ == "__main__": device: str = "cuda:0" if torch.cuda.is_available() else "cpu" model, tokenizer = loadModel(device=device) # # 音频(aac) # audioProcess = sp.Popen(["ffmpeg", "-f", "s16le", '-y', '-vn', "-ac", "1", "-ar", "16000", "-channel_layout", "mono", '-acodec','pcm_s16le', "-i", "pipe:", "-ar", "44100", "-f", "flv", rtmpAURL], stdin=sp.PIPE) # # 视频(mp4) # vedioProcess = sp.Popen(["ffmpeg", "-re", '-y', '-an', '-rtbufsize', '1024M', "-f", "dshow", "-i", "video=Q8 HD Webcam", "-pix_fmt", "yuvj420p", "-framerate", "25", '-vcodec', 'libx264', '-preset', 'fast', '-crf', '25', "-vf", "scale=640:480", "-f", "flv", rtmpVURL], shell=True) # #https://stackoverflow.com/questions/18618191/ffmpeg-merge-multiple-rtmp-stream-inputs-to-a-single-rtmp-output #ffmpeg -i rtmp://ip:1935/live/micMyStream7 -i rtmp://ip:1935/live/MyStream7 -filter_complex "[0:a][1:a]amix[a]" -map 0:v -map "[a]" -c:v copy -f flv rtmp://ip:1935/live/bcove7 # # 合并(live) # mergeVAP = sp.Popen(["ffmpeg", "-i", rtmpAURL, "-i", rtmpVURL, "-c:v", "copy", "-c:a", "aac", "-preset", "veryfast", "-f", "flv", "-flvflags", "no_duration_filesize", rtmpURL], shell=True, stderr=True) writeAudioThead = Thread(target=soundRecorder, args=(model, device, tokenizer, audioProcess)) writeAudioThead.start() writeAudioThead.join() audioProcess.wait() vedioProcess.wait() mergeVAP.wait() 问题1:这三个是串行执行的. 如何让他们异步执行 问题2: 上面的示例串行执行, 为什么 mergeVAP没有执行.在命令行上执行是可以执行的
如题,调用方式如下: avformat_open_input(&av_format_ctx, info->curr_info.url, NULL, NULL); 第一个参数初始化为 "NULL" 错误截图: "image.png" (https://wmprod.oss-cn-shanghai.aliyuncs.com/images/20241210/896c8e2dccfe44f2a89e58db27649d17.png)
用ffmpeg怎么把25个小视频组合成一个大视频? 每个小视频90X160px,没有声音,只有视频 5行5列 一共25个 以下代码来自网络,但是行不通,小视频没有声音,只有画面 @echo off set dirname=D:\短视频\shortvideos_small\ ffmpeg -i %dirname%1.mp4 -i %dirname%2.mp4 -i %dirname%3.mp4 -i %dirname%4.mp4 -i %dirname%5.mp4 -i %dirname%6.mp4 -i %dirname%7.mp4 -i %dirname%8.mp4 -i %dirname%9.mp4 -i %dirname%10.mp4 -i %dirname%11.mp4 -i %dirname%12.mp4 -i %dirname%13.mp4 -i %dirname%14.mp4 -i %dirname%15.mp4 -i %dirname%16.mp4 -i %dirname%17.mp4 -i %dirname%18.mp4 -i %dirname%19.mp4 -i %dirname%20.mp4 -i %dirname%21.mp4 -i %dirname%22.mp4 -i %dirname%23.mp4 -i %dirname%24.mp4 -i %dirname%25.mp4 -filter_complex "[0:v]scale=iw/5:-1[v0];[1:v]scale=iw/5:-1[v1];[2:v]scale=iw/5:-1[v2];[3:v]scale=iw/5:-1[v3];[4:v]scale=iw/5:-1[v4];[5:v]scale=iw/5:-1[v5];[6:v]scale=iw/5:-1[v6];[7:v]scale=iw/5:-1[v7];[8:v]scale=iw/5:-1[v8];[9:v]scale=iw/5:-1[v9];[10:v]scale=iw/5:-1[v10];[11:v]scale=iw/5:-1[v11];[12:v]scale=iw/5:-1[v12];[13:v]scale=iw/5:-1[v13];[14:v]scale=iw/5:-1[v14];[15:v]scale=iw/5:-1[v15];[16:v]scale=iw/5:-1[v16];[17:v]scale=iw/5:-1[v17];[18:v]scale=iw/5:-1[v18];[19:v]scale=iw/5:-1[v19];[20:v]scale=iw/5:-1[v20];[21:v]scale=iw/5:-1[v21];[22:v]scale=iw/5:-1[v22];[23:v]scale=iw/5:-1[v23];[24:v]scale=iw/5:-1[v24];[v0][v1][v2][v3][v4][v5][v6][v7][v8][v9][v10][v11][v12][v13][v14][v15][v16][v17][v18][v19][v20][v21][v22][v23][v24]xstack=inputs=25:layout=0_0|0_h0|0_h0+h1|0_h0+h1+h2|0_h0+h1+h2+h3|w0_0|w0_h0|w0_h0+h1|w0_h0+h1+h2|w0_h0+h1+h2+h3|w0+w4_0|w0+w4_h0|w0+w4_h0+h1|w0+w4_h0+h1+h2|w0+w4_h0+h1+h2+h3|w0+w4+w8_0|w0+w4+w8_h0|w0+w4+w8_h0+h1|w0+w4+w8_h0+h1+h2|w0+w4+w8_h0+h1+h2+h3|w0+w4+w8+w12_0|w0+w4+w8+w12_h0|w0+w4+w8+w12_h0+h1|w0+w4+w8+w12_h0+h1+h2|w0+w4+w8+w12_h0+h1+h2+h3;[0:a][1:a][2:a][3:a][4:a][5:a][6:a][7:a][8:a][9:a][10:a][11:a][12:a][13:a][14:a][15:a][16:a][17:a][18:a][19:a][20:a][21:a][22:a][23:a][24:a]amix=inputs=25" D:\短视频\output.mp4