Windos下的HM播放器,MFC制作 - Fla

Windos下的HM播放器,MFC制作

Fla posted @ 2013年4月09日 16:23 in HEVC with tags MFC HEVC HM H.265解码器 , 2306 阅读

使用MFC制作播放器时,需要考虑的主要问题有两点:

1、如何让HM解码出一帧图像就停止。

2、如何显示HM解码的图像(YUV420格式)。

第一点,建议大家自己跟踪调试一下HM的解码器,最起码需要在 TAppDecoder 项目的代码级别进行调试,

可以不进入更深的解码过程。这个过程清除之后,应该知道如何得到一帧解码的图像,

HM代码中是在TAppDecoder/TDecTop.cpp 的 TDecTop::xWriteFlush函数中,

所以我们所做的只是大概修改一下其处理流程,然后封装出一个新的接口。

 

第二点,如何显示YUV420格式的图像

这里我的方法是先转换成一个Bitmap,这里涉及到了YUV到RGB颜色空间的转换。

较为详细的说明可以参加我的CSDN博客:http://blog.csdn.net/luofl1992/article/details/8654317

 

以下为关键接口的代码:

#include "stdafx.h"
#include "NcMHevcDecoder.h"

Bool g_md5_mismatch; ///< top level flag to signal when there is a decode problem
#define TEST_RECON	0
CNcMHevcDecoder::CNcMHevcDecoder()
	: bytestream(bitstreamFile)
	, poc(0)
	, recon_opened(false)
	, width(416)
	, height(240)
	, bFirstFrame(false)
	, last_tId(-2)
{
	pcListPic = NULL;
	lpDiBits = NULL;
}

CNcMHevcDecoder::~CNcMHevcDecoder()
{
	//	xFlushOutput( pcListPic );
	// delete buffers
	m_cTDecTop.deletePicBuffer();

	// destroy internal classes
	xDestroyDecLib();
	TAppDecTop::destroy();
}

bool CNcMHevcDecoder::OpenFile( LPCTSTR lpFilePath, BITMAPINFO &info )
{
	TAppDecTop::create();
	if( !TAppDecTop::parseCfg( lpFilePath ) )
	{
		TAppDecTop::destroy();
		return false;
	}
#if TEST_RECON
	m_pchReconFile = "recPlayer.yuv";
#endif
	bitstreamFile.open( m_pchBitstreamFile, ifstream::in | ifstream::binary );
	if( !bitstreamFile )
		return false;
	
	// create & initialize internal classes
	xCreateDecLib();
	xInitDecLib();
#if 0
	m_iPOCLastDisplay = m_iSkipFrame;      // set the last displayed POC correctly for skip forward.
#else
	m_iPOCLastDisplay = -1;
#endif
	UInt tId = DecodeFrames();
	if ( pcListPic != NULL && pcListPic->size() )
	{
		TComPicYuv * pcPicYuv = (*(pcListPic->begin()))->getPicYuvRec();
		width = pcPicYuv->getWidth();
		height = pcPicYuv->getHeight();
		bFirstFrame = true;
	}

	BITMAPINFOHEADER &header( bmpInfo.bmiHeader );
	header.biBitCount = 32;
	header.biClrImportant = 0;
	header.biClrUsed = 0;
	header.biCompression = 0;
	//	header.biHeight = 0 - is->video_st->codec->height;	// 这里要是负数,否则图像是倒着的
	//	header.biWidth =  is->video_st->codec->width;
	header.biPlanes = 1;
	header.biSize = 40;
	header.biSizeImage = 0;
	header.biXPelsPerMeter = 0;
	header.biYPelsPerMeter = 0;
	header.biHeight = -height;
	header.biWidth = width;
	memset ( bmpInfo.bmiColors, 0, sizeof(bmpInfo.bmiColors));
	memcpy( &info, &bmpInfo, sizeof(bmpInfo) );
	return true;
}

int CNcMHevcDecoder::DecodeFrames( void )
{	
	// main decode loop
	if ( last_tId != -2 || last_tId == -1 )
		return last_tId;
	while ( !!bitstreamFile )
	{
		/* location serves to work around a design fault in the decoder, whereby
		 * the process of reading a new slice that is the first slice of a new frame
		 * requires the TDecTop::decode() method to be called again with the same
		 * nal unit. */
		streampos location = bitstreamFile.tellg();
		AnnexBStats stats = AnnexBStats();
		Bool bPreviousPictureDecoded = false;

		vector<uint8_t> nalUnit;
		InputNALUnit nalu;
		byteStreamNALUnit( bytestream, nalUnit, stats );

		// call actual decoding function
		Bool bNewPicture = false;
		if( nalUnit.empty() )
		{
			/* this can happen if the following occur:
			 *  - empty input file
			 *  - two back-to-back start_code_prefixes
			 *  - start_code_prefix immediately followed by EOF
			 */
			fprintf( stderr, "Warning: Attempt to decode an empty NAL unit\n" );
		}
		else
		{
			read( nalu, nalUnit );
			if( ( m_iMaxTemporalLayer >= 0 && nalu.m_temporalId > m_iMaxTemporalLayer ) || !isNaluWithinTargetDecLayerIdSet( &nalu ) )
			{
				if( bPreviousPictureDecoded )
				{
					bNewPicture = true;
					bPreviousPictureDecoded = false;
				}
				else
				{
					bNewPicture = false;
				}
			}
			else
			{
				bNewPicture = m_cTDecTop.decode( nalu, m_iSkipFrame, m_iPOCLastDisplay );
				if( bNewPicture )
				{
					bitstreamFile.clear();
					/* location points to the current nalunit payload[1] due to the
					 * need for the annexB parser to read three extra bytes.
					 * [1] except for the first NAL unit in the file
					 *     (but bNewPicture doesn't happen then) */
					bitstreamFile.seekg( location - streamoff( 3 ) );
					bytestream.reset();
				}
				bPreviousPictureDecoded = true;
			}
		}
		if( bNewPicture || !bitstreamFile )
		{
			// 执行环路滤波
			m_cTDecTop.executeLoopFilters( poc, pcListPic );
		}

		if( pcListPic )
		{
			if( m_pchReconFile && !recon_opened )
			{
				if( !m_outputBitDepthY )
				{
					m_outputBitDepthY = g_bitDepthY;
				}
				if( !m_outputBitDepthC )
				{
					m_outputBitDepthC = g_bitDepthC;
				}

				m_cTVideoIOYuvReconFile.open( m_pchReconFile, true, m_outputBitDepthY, m_outputBitDepthC, g_bitDepthY, g_bitDepthC ); // write mode
				recon_opened = true;
			}
			if( bNewPicture )
			{
			/*	if ( nalu.m_nalUnitType == NAL_UNIT_CODED_SLICE_IDR
					|| nalu.m_nalUnitType == NAL_UNIT_CODED_SLICE_IDR_N_LP
					|| nalu.m_nalUnitType == NAL_UNIT_CODED_SLICE_BLA_N_LP
					|| nalu.m_nalUnitType == NAL_UNIT_CODED_SLICE_BLANT
					|| nalu.m_nalUnitType == NAL_UNIT_CODED_SLICE_BLA )
					xFlushOutput( pcListPic );*/
				//  write reconstruction to file
#if TEST_RECON
				xWriteOutput( pcListPic, nalu.m_temporalId );	// 这里修改成传递出去一个新的图像帧用于界面显示
#else
				return last_tId = nalu.m_temporalId;
#endif
			}
		}
	}
	return -1;
}

inline byte clip(const Int x)
{
	return x > 255 ? 255 : ( x < 0 ? 0 : x );
}

// 是否采用快速 YUV 到 RGB 转换算法(四个点同时计算)
#define USE_FAST_YUV_2_RGB 0

#if USE_FAST_YUV_2_RGB
// pcPicYuv := pcPic->getPicYuvRec()
// pRGB := RGB颜色的起始地址
static void NcDecoderYUV2RGB(TComPicYuv *pcPicYuv, byte *pRGB, int width, int height)
{
	if ( NULL == pRGB )
		return;
	Pel *y = pcPicYuv->getLumaAddr(),	// 重建图像的 Y 颜色开始地址
		*u = pcPicYuv->getCbAddr(),		// 重建图像的 Cb 颜色开始地址
		*v = pcPicYuv->getCrAddr();		// 重建图像的 Cr 颜色开始地址
	UInt dRGB = width << 2;			// bmp 一行的某个像素的下一行同样位置的像素值偏移
	UInt dY = pcPicYuv->getStride();	// YUV 一行的一个y值对应下一行的同样位置的位置增量
	const Int dY2 = dY + 1, dRGB2 = dRGB + 4;
	UInt d2Y = 2 *dY - width;	// YUV Y像素偏移值的修正
	UInt dUV = pcPicYuv->getCStride() - width / 2;	// YUV UV像素偏移值的修正
	Int dB = 0, dG = 0, dR = 0, i = 0, j = 0;
	Int Cb = 0, Cr = 0;
	height = height >> 1 ;	width = width >> 1 ;	// 行数和列数均减半
	for ( j = 0; j < height; j++ )
	{
		for ( i = 0; i < width; i++ )
		{
			// ARGB_8888: 四个像素值依次为  BGRA (已经验证,同时A值应为255表示不透明)
			// a fast mode using shifting.
			Cb = *u - 128;
			Cr = *v - 128;
			dB = Cb + (Cb>>1) + (Cb>>2) + (Cb>>6);
			dG = - ((Cb>>2) + (Cb>>4) + (Cb>>5) ) - ((Cr>>1) + (Cr>>3) + (Cr>>4) + (Cr>>5));
			dR = Cr + (Cr>>2) + (Cr>>3) + (Cr>>5);
#if 0
			*(  pRGB) = clip(*y + dB);		// B
			*(pRGB+ 4 ) = clip(*(y+1) + dB);
			*(pRGB+dRGB) = clip(*(y+dY) + dB);
			*(pRGB+dRGB2) = clip(*(y+dY2) + dB);
			*(++pRGB) = clip(*y + dG);		// G
			*(pRGB+ 4 ) = clip(*(y+1) + dG);
			*(pRGB+dRGB) = clip(*(y+dY) + dG);
			*(pRGB+dRGB2) = clip(*(y+dY2) + dG);
			*(++pRGB) = clip(*y + dR);		// R
			*(pRGB+ 4 ) = clip(*(y+1) + dR);
			*(pRGB+dRGB) = clip(*(y+dY) + dR);
			*(pRGB+dRGB2) = clip(*(y+dY2) + dR);
#else
			Pel y0 = *y, y1 = *(y+1);
			Pel y2 = *(y+dY), y3 = *(y+dY2);
			*(  pRGB) = clip(y0 + dB);		// B
			*(pRGB+ 4 ) = clip(y1 + dB);
			*(pRGB+dRGB) = clip(y2 + dB);
			*(pRGB+dRGB2) = clip(y3 + dB);
			*(++pRGB) = clip(y0 + dG);		// G
			*(pRGB+ 4 ) = clip(y1 + dG);
			*(pRGB+dRGB) = clip(y2 + dG);
			*(pRGB+dRGB2) = clip(y3 + dG);
			*(++pRGB) = clip(y0 + dR);		// R
			*(pRGB+ 4 ) = clip(y1 + dR);
			*(pRGB+dRGB) = clip(y2 + dR);
			*(pRGB+dRGB2) = clip(y3 + dR);
#endif
			pRGB += 6;	// 总计两个RGBA像素,指针需要移动 8 个字节,补齐
			// 初始化中已经对 A 赋值,故此处不再处理 2013-3-24
			y+=2;
			u++;	v++;	// 横向两个点才变化一次,因为其表示了 2 * 2个点的像素值
		}	// for loop (i < width)
		pRGB += dRGB;	// 跳过一行,因为上面是两行两行的处理,每次处理了 4 个点
		y += d2Y;		// y 像素值的偏移位置修正(两行)
		u += dUV;		// u 像素值偏移的修正
		v += dUV;		// v 像素值偏移的修正
	}	// for loop (j < height)
}

#else

static void NcDecoderYUV2RGB(TComPicYuv *pcPicYuv, byte *pRGB, int width, int height)
{
	if ( NULL == pRGB )
		return;
	Pel *y = pcPicYuv->getLumaAddr(),	// 重建图像的 Y 颜色开始地址
		*u = pcPicYuv->getCbAddr(),		// 重建图像的 Cb 颜色开始地址
		*v = pcPicYuv->getCrAddr();		// 重建图像的 Cr 颜色开始地址
//	byte *pRGB = (byte *)lpDiBits;		// bmp 的颜色开始
//	UInt *p = lpDiBits;
	UInt dy = pcPicYuv->getStride();		// 一行的一个y值对应下一行的同样位置的位置增量
	UInt dUV = (pcPicYuv->getCStride() - width / 2);	// 像素值偏移的修正
	for ( int j = 0; j < height; j++ )
	{
		for ( int i = 0; i < width; i+=2 )
		{
			// ARGB_8888: 四个像素值依次为  BGRA (已经验证,同时A值应为255表示不透明)
#if 1
			int dB = 1.772 * ( *u - 128 );
			int dG = -0.34413*(*u-128) - 0.71414*(*v-128);
			int dR = 1.402*(*v-128);
			*pRGB++ = clip(*y + 1.772 * ( *u - 128 ));	// B
			*pRGB++ = clip(*y -0.34413*(*u-128) - 0.71414*(*v-128));	// G
			*pRGB++ = clip(*y + 8 + 1.402*(*v-128));	// R
			*pRGB++; // = 255;		// A
			y++;
			*pRGB++ = clip(*y + 1.772 * ( *u - 128 ));	// B
			*pRGB++ = clip(*y -0.34413*(*u-128) - 0.71414*(*v-128));	// G
			*pRGB++ = clip(*y + 8 + 1.402*(*v-128));	// R
			*pRGB++;	//  = 255;		// A
#else
			// 以下:用于测试各个位表示什么颜色 2013-3-14 已经验证按指针从低到高访问时依次为 BGRA
			*pRGB++ = 255;	*pRGB++ = 0;	*pRGB++ = 0;	*pRGB++ = 128;		y++;
			*pRGB++ = 255;	*pRGB++ = 0;	*pRGB++ = 0;	*pRGB++ = 128;
	/*		// 用32位来处理,效率反而降低了
			*p++ = ((UInt( *y + 1.772 * *u - 0xE2) & 0xff) << 0)	// B
				 | ((UInt( *y - 0.34413 * *u - 0.71414 * *v + 0x87) & 0xff) << 8)	// G
				 | ((UInt( *y + 1.402 * *v  - 0xAB) & 0xff) << 16 )	// R
				 | 0xff000000;
			y++;
			*p++ = ((UInt( *y + 1.772 * *u - 0xE2) & 0xff) << 0)	// B
				 | ((UInt( *y - 0.34413 * *u - 0.71414 * *v + 0x87) & 0xff) << 8)	// G
				 | ((UInt( *y + 1.402 * *v - 0xAB) & 0xff) << 16 )	// R
				 | 0xff000000;
*/				//		*p++ = 0xff0000ff;	y++;	*p++ = 0xff0000ff;
#endif
			y++;
			u++;	v++;	// 横向两个点才变化一次,因为其表示了 2 * 2个点的像素值
		}
		y += ( pcPicYuv->getStride() - width);	// 像素值的偏移位置修正
		if ( j % 2 == 0 )
		{
			u -= width / 2;		v -= width / 2;		// 偶数行不变
		}
		else
		{
			// 奇数行(从0开始编号)才变化,也就是到下一个偶数像素点时改变了
			u += dUV;	// 像素值偏移的修正
			v += dUV;	// 像素值偏移的修正
		}
	}	// for loop (j < height2)
}
#endif

LPVOID CNcMHevcDecoder::GetNextFrame(void)
{
	UInt tId = DecodeFrames();
	last_tId = -2;
	TComList<TComPic*>::iterator iterPic   = pcListPic->begin();
	Int not_displayed = 0;

	while (iterPic != pcListPic->end())
	{
		TComPic* pcPic = *(iterPic);
		if(pcPic->getOutputMark() && pcPic->getPOC() > m_iPOCLastDisplay)
			not_displayed++;
		iterPic++;
	}
	if ( 0 == not_displayed )
	{
		while ( -1 != tId && 0 == not_displayed )
		{
			tId = DecodeFrames();
			iterPic = pcListPic->begin();
			while ( not_displayed == 0 && iterPic != pcListPic->end())
			{
				TComPic* pcPic = *(iterPic);
				if(pcPic->getOutputMark() && pcPic->getPOC() > m_iPOCLastDisplay)
					not_displayed++;
				iterPic++;
			}
		}
		if ( 0 == not_displayed )
			return NULL;
	}
	while ( 1 )
	{
	iterPic   = pcListPic->begin();
	while (iterPic != pcListPic->end())	// if -> while 2013-3-28
	{
		TComPic* pcPic = *(iterPic);
		if ( /*pcPic->getOutputMark() &&*/ (not_displayed >  pcPic->getNumReorderPics(tId) && pcPic->getPOC() > m_iPOCLastDisplay))
		{
			// write to file
			not_displayed--;
			if ( NULL == lpDiBits )
			{
				width = pcPic->getPicYuvRec()->getWidth();
				height = pcPic->getPicYuvRec()->getHeight();
				lpDiBits = new char[width * height * 4];		// 一般来说视频的图像宽度为 4 的倍数,所以不用管对齐了
				memset( lpDiBits, 0xffffffff, width * height * 4 );
			}
			assert( lpDiBits != NULL );
			NcDecoderYUV2RGB( pcPic->getPicYuvRec(), (byte *)lpDiBits, width, height);
			/*
			if ( m_pchReconFile )
			{
				CroppingWindow &crop = pcPic->getCroppingWindow();
				m_cTVideoIOYuvReconFile.write( pcPic->getPicYuvRec(), crop.getPicCropLeftOffset(), crop.getPicCropRightOffset(), crop.getPicCropTopOffset(), crop.getPicCropBottomOffset() );
			}

			// update POC of display order
			*/
			m_iPOCLastDisplay = pcPic->getPOC();
			// erase non-referenced picture in the reference picture list after display
			pcPic->setOutputMark(false);
			return lpDiBits;
		}
		if ( !pcPic->getSlice(0)->isReferenced() && pcPic->getOutputMark() == false /*&& pcPic->getReconMark() == true*/ )
		{
#if !!DYN_REF_FREE
			pcPic->setReconMark(false);

			// mark it should be extended later
			pcPic->getPicYuvRec()->setBorderExtension( false );
#else
			pcPic->destroy();
			pcListPic->erase( iterPic );
//				iterPic = pcListPic->begin(); // to the beginning, non-efficient way, have to be revised!
#endif
			iterPic = pcListPic->begin();
		}
		else
			iterPic++;
	}
	tId = DecodeFrames();
	last_tId = -2;
	}	// 2013-3-28 
	return ((tId != -1) ? lpDiBits : NULL);
}
/*

UINT CNcMHevcDecoder::ThreadDecode( LPVOID lpVoid )
{
	CNcMHevcDecoder *pDecoder = ( CNcMHevcDecoder * )lpVoid;
	pDecoder->DecodeFrames();
	return 0;
}
*/
PS:欢迎访问本人的CSDN博客:http://blog.csdn.net/luofl1992

登录 *


loading captcha image...
(输入验证码)
or Ctrl+Enter
Host by is-Programmer.com | Power by Chito 1.3.3 beta | © 2007 LinuxGem | Design by Matthew "Agent Spork" McGee