增加扩展屏的框架

This commit is contained in:
huanglinhuan
2025-12-22 13:48:06 +08:00
parent 1bf30d3c4c
commit 065251f727
13 changed files with 834 additions and 28 deletions

View File

@@ -4,11 +4,7 @@
VideoEncoder::VideoEncoder() = default;
VideoEncoder::~VideoEncoder() {
if (swsContext_) sws_freeContext(swsContext_);
if (codecContext_) avcodec_free_context(&codecContext_);
if (frame_) av_frame_free(&frame_);
if (packet_) av_packet_free(&packet_);
if (stagingTexture_) stagingTexture_.Reset();
Release();
}
bool VideoEncoder::Initialize(ID3D11Device* device, int width, int height, int fps, int bitrate) {
@@ -34,7 +30,8 @@ bool VideoEncoder::Initialize(ID3D11Device* device, int width, int height, int f
return false;
}
// 2. Initialize FFmpeg
// 2. Initialize Encoder
#ifndef NO_FFMPEG
const AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!codec) {
std::cerr << "Codec H.264 not found" << std::endl;
@@ -84,10 +81,27 @@ bool VideoEncoder::Initialize(ID3D11Device* device, int width, int height, int f
std::cerr << "Could not allocate packet" << std::endl;
return false;
}
#else
// Stub path without FFmpeg
#endif
return true;
}
void VideoEncoder::Release() {
if (swsContext_) { sws_freeContext(swsContext_); swsContext_ = nullptr; }
if (codecContext_) { avcodec_free_context(&codecContext_); codecContext_ = nullptr; }
if (frame_) { av_frame_free(&frame_); frame_ = nullptr; }
if (packet_) { av_packet_free(&packet_); packet_ = nullptr; }
if (stagingTexture_) { stagingTexture_.Reset(); }
pts_ = 0;
}
bool VideoEncoder::Reinitialize(int width, int height, int fps, int bitrate) {
Release();
return Initialize(device_, width, height, fps, bitrate);
}
bool VideoEncoder::EncodeFrame(ID3D11Texture2D* texture, std::vector<uint8_t>& outputData, bool& isKeyFrame) {
if (!texture || !stagingTexture_ || !context_) return false;
@@ -99,6 +113,7 @@ bool VideoEncoder::EncodeFrame(ID3D11Texture2D* texture, std::vector<uint8_t>& o
HRESULT hr = context_->Map(stagingTexture_.Get(), 0, D3D11_MAP_READ, 0, &mapped);
if (FAILED(hr)) return false;
#ifndef NO_FFMPEG
// 3. Convert BGRA to YUV420P
if (!swsContext_) {
swsContext_ = sws_getContext(
@@ -111,13 +126,12 @@ bool VideoEncoder::EncodeFrame(ID3D11Texture2D* texture, std::vector<uint8_t>& o
uint8_t* srcSlice[] = { (uint8_t*)mapped.pData };
int srcStride[] = { (int)mapped.RowPitch };
// We need to handle potential padding in mapped.RowPitch vs width*4
// FFmpeg handles strides correctly.
sws_scale(swsContext_, srcSlice, srcStride, 0, height_, frame_->data, frame_->linesize);
#endif
context_->Unmap(stagingTexture_.Get(), 0);
#ifndef NO_FFMPEG
// 4. Encode
frame_->pts = pts_++;
@@ -141,6 +155,10 @@ bool VideoEncoder::EncodeFrame(ID3D11Texture2D* texture, std::vector<uint8_t>& o
av_packet_unref(packet_);
}
#else
// Stub: no encoding, but pipeline succeeds
isKeyFrame = false;
#endif
return true;
}