2015年3月23日 星期一

Kanzi 在QNX video capture的問題

本來想用Kanzi的記憶體管理來建立Capture buffer 的大小,參考vcapture的例子,影像是用SCREEN_FORMAT_YUY2的格式。

大小應該是width*height*2

但這樣做,一直有random的crash或是不正常的綠邊,後來改自行malloc,但也不正確,影像會抖得很厲害。


最後只能用它的screen來create,才會得到穩定的效果

rc = screen_get_buffer_property_pv(video->screen_buf[i], SCREEN_PROPERTY_POINTER, &(video->pointers[i]));

推測是抓回來的capture data不只有yuy2的data,可能還有些其它的東西,導致size不大一樣。


但是即使這樣,畫面仍會規律的抖動。印log一直有drop frame的狀況。

經過很多交叉測試,發現是我用軟體做yuy2 to RGB24,速度太慢,導致會抖動。

後來只能用Shader來做。

每個frame不做軟體decode,把raw的fame data 傳到shader裡。

return (kzByte*)video->pointers[buf_idx];

因為是yuy2的格式,剛好找到一個Nvidia的Demo有提供一個轉換的shader

首先在kanzi那建立format為KZU_TEXTURE_CHANNELS_LUMINANCE_ALPHA的貼圖。
result = kzuSharedImageTextureCreate(kzuUIDomainGetResourceManager(kzuObjectNodeGetUIDomain(layerNode)), "video texture",
KZU_TEXTURE_CHANNELS_LUMINANCE_ALPHA, VideoCaptureGetWidth(video), VideoCaptureGetHeight(video), KZ_NULL,
KZ_NULL, KZ_FALSE, &videoCapturePlayer->texture);

update時把它寫入
result = kzuSharedImageTextureUpdate(videoCapturePlayer->texture, data, videoWidth * videoHeight * 2);


uniform sampler2D Texture;
uniform sampler2D TextureMask;
uniform lowp float BlendIntensity;
uniform lowp vec4 Ambient;
varying mediump vec2 vTexCoord;
varying highp vec2 vScreenPos;

// CCIR 601 standard
const mediump vec3 std601R = vec3(  1.0, -0.00092674, 1.4017        );
const mediump vec3 std601G = vec3(  1.0, -0.3437,    -0.71417    );
const mediump vec3 std601B = vec3( 1.0,  1.7722,     0.00099022         );
const mediump vec4 stdbias = vec4(  0, -0.5,       -0.5, 0       );

void main()
{
    precision mediump float;
 vec2 uv0, uv1;    
    float SrcTexWidth=720.0;
    float texel_sample = 1.0 / (SrcTexWidth);
    //float isOddUV = floor(fract((vTexCoord.x * SrcTexWidth) * 0.5) * 2.0);
    float isOddUV = fract(floor(vTexCoord.x * SrcTexWidth) * 0.5) * 2.0;
    uv0 = vTexCoord;
    uv1 = vTexCoord;
 //   vec2 screen_uv = vScreenPos.xy/vScreenPos.w;
  //  screen_uv = (screen_uv.xy + vec2(1.0)) / 2.0;
    // If (x,y) address is ODD,  then we need the (x-1,y) sample to decode it
    // If (x,y) address is EVEN, then we need the (x+1,y) sample to decode it.
 uv0.x = vTexCoord.x - (isOddUV * texel_sample);
 uv1.x = vTexCoord.x + texel_sample;
 uv1.y = vTexCoord.y;

 // we sample the neighboring texture samples
 vec4 texColor0 = texture2D( Texture, uv0 );
 vec4 texColor1 = texture2D( Texture, uv1 );
 vec4 mask = texture2D(TextureMask,vScreenPos);
 // For A8L8, assume A8<-alpha L8<-rgb
 texColor0.r = texColor0.r; // assign Y0 (1st position) automatic
 texColor0.g = texColor0.a; // assign U0 (2nd position)
 texColor0.b = texColor1.a; // assign V0 (3rd position)
 
 texColor1.r = texColor1.r; // assign Y1 (1st position) automatic
 texColor1.g = texColor0.a; // assign U0 (2nd position)
 texColor1.b = texColor1.a; // assign V0 (3rd position)
 
 // assume RGBA0 (Y0 U0)
 // assume RGBA1 (Y1 V0)
    // Let's just average the luma, to make it simple 
 texColor0 += stdbias;
 texColor0 *= (1.0-isOddUV);

 // assume RGBA0 (Y0 U0)
 // assume RGBA1 (Y1 V0)
 texColor1 += stdbias; 
 texColor1 *= (isOddUV);
 
 texColor0 = texColor0 + texColor1;
 vec4 color = vec4((texColor0.r + 1.37075*texColor0.b),
         (texColor0.r - (0.698001 *texColor0.b-0.337633*texColor0.g)),
           (texColor0.r +  1.73246*texColor0.g),
           1.0 );
           
    //vec4 color = vec4(dot(std601R, texColor0.rgb),
        //   dot(std601G, texColor0.rgb),
        //   dot(std601B, texColor0.rgb),
        //   1.0 );
   gl_FragColor.rgba = clamp(color.rgba,0.0,1.0);// *Ambient* BlendIntensity;
   gl_FragColor.a *=  ((1.0-mask.a));
    
}

不過,出來的顏色很奇怪。試了不同的轉換matrix都一樣,只好展開用微調的...猜測可能是kanzi在寫入sharetexutre時有做gamma correct,因為我找不到地方可以關掉,也不知道它有沒做,文件裡沒寫,只好先將就一下了...

沒有留言 :

張貼留言