Monday, 26 January 2015

fore ground and background subtraction

clear all;
close all;
clc;
threshold = 0.05;
file = 'Video.avi';
vfr = vision.VideoFileReader(file,'ImageColorSpace','RGB');
%vfr = vision.VideoFileReader(file, 'ImageColorSpace', 'gray');
imagebox = step(vfr);
backgnd = imagebox;
g_img = rgb2gray(backgnd);
imagesize = size(backgnd);
width = imagesize(2);
height = imagesize(1);
foregnd = zeros(height, width);
while ~isDone(vfr)
   imagebox1 = step(vfr);
   foregndgray= rgb2gray(imagebox1);
   f_gnddiff= abs(double(foregndgray) - double(g_img));
  for i=1:width
for j=1:height
if ((foregnddiff(j,i) > threshold))
     foregnd(j,i) = 255;
else
     foregnd(j,i) = 0;    
end
end
end
g_img = foregndgray;
figure(1),subplot(3,1,1),imshow(imagebox1)
subplot(3,1,2),imshow(foregndgray)
subplot(3,1,3),imshow(uint8(foregnd))

end

locate points in an image

NumberOfimages=4;       %chose the number of images you want to give input
srcFiles = dir('E:\frames12\*.png');  
for i = 1 : NumberOfimages
filename = strcat('E:\frames12\',srcFiles(i).name);
 I = imread(filename);  
 faceDetector = vision.CascadeObjectDetector();
fbox= step(faceDetector,I);
videoOutFace = insertObjectAnnotation(I,'rectangle',fbox,'Face');
%figure, imshow(videoOutFace), title('Detected face');
e=imcrop(I,fbox);
%figure,imshow(e);
imwrite(e,strcat('E:\frames13\',int2str(i),'.png'));
 imwrite(e,'acquired.jpg');
  a=imread('acquired.jpg');
I=rgb2gray(a);
%cornerDetector = vision.CornerDetector('Method','Minimum eigenvalue (Shi & Tomasi)','MaximumCornerCount',120);I = rgb2gray(a);
cornerDetector = vision.CornerDetector('Method','Local intensity comparison (Rosten & Drummond)','MaximumCornerCount',250);
pts = step(cornerDetector, I);
drawMarkers = vision.MarkerInserter('Shape','Plus','BorderColor','Custom','Size',2);
J = repmat(I,[1 1 3]);
J = step(drawMarkers, J, pts);
%imshow(J); title ('Corners detected in a grayscale image');
%imwrite(image,FullFileName,'png');
    imwrite(J,strcat('E:\frames12\frames\',int2str(i),'.png'));
end



Sunday, 7 December 2014

detect corners of a face

Iface=imread('fr56.png');
>> faceDetector = vision.CascadeObjectDetector('FrontalFaceCART');
cornerDetector = vision.CornerDetector('Method','Harris corner detection (Harris & Stephens)');
Irgb2gray=rgb2gray(Iface);
bboxes = step(faceDetector, Irgb2gray);
corners = detectHarrisFeatures(Irgb2gray);
Irgb2gray = insertObjectAnnotation(Iface, 'rectangle', bboxes, 'Face');
figure, imshow(Irgb2gray), title('Detected faces'); hold on;
plot(points.selectStrongest(50));

Tuesday, 14 October 2014

How to locate people in an image

%Create a people detector, and load the input image.

   peopleDetector = vision.PeopleDetector;
   I = imread('visionteam1.jpg');
%Detect people using the people detector object.

   [bboxes, scores] = step(peopleDetector, I);
%Create a shape inserter and a score inserter.

   shapeInserter = vision.ShapeInserter('BorderColor','Custom','CustomBorderColor',[255 255 0]);
   
    scoreInserter = vision.TextInserter('Text',' %f','Color', [0 80 255],'LocationSource','Input port','FontSize',16);
%Draw detected bounding boxes, and insert scores using the inserter objects.

    I = step(shapeInserter, I, int32(bboxes));
    I = step(scoreInserter, I, scores, int32(bboxes(:,1:2)));
    figure, imshow(I)
    title('Detected people and detection scores'); 

Tuesday, 30 September 2014

find distance of an image

a=imread('peppers.png');
 bw=im2bw(a);
 d1=bwdist(bw,'euclidean');
imshow(mat2gray(d1));
hold on,imcontour(d1);

Thursday, 28 August 2014

track an object is optical flow

vid = videoinput('winvideo', 1);
% Configure the number of frames to log upon triggering.
set(vid,'FramesPerTrigger', 1);
set(vid,'TriggerRepeat', Inf);
optical = vision.OpticalFlow( ...
    'OutputValue', 'Horizontal and vertical components in complex form');
imsize = get(vid,'VideoResolution');
shapes = vision.ShapeInserter;
shapes.Shape = 'Lines';
shapes.BorderColor = 'white';
row = 1:5:imsize(2);
column = 1:5:imsize(1);
[Cv, Rv] = meshgrid(column,row);
Rv = Rv(:)';
Cv = Cv(:)';
hVideoIn = vision.VideoPlayer;
hVideoIn.Name  = 'Original Video';
hVideoOut = vision.VideoPlayer;
hVideoOut.Name  = 'Motion Detected Video';
% Start the Image Acquisition device.
start(vid);

% Set up for stream
nFrames = 0;
while (nFrames<100)     % Process for the first 100 frames.
    % Acquire single frame of single data type.
    rgbData = getdata(vid, 1, 'single');

    % Compute the optical flow for that particular frame.
    optFlow = step(optical,rgb2gray(rgbData));

    % Downsample optical flow field.
    optFlow_DS = optFlow(row, column);
    H = imag(optFlow_DS)*50;
    V = real(optFlow_DS)*50;

    % Draw lines on top of image
    lines = [Rv; Cv; Rv+H(:)'; Cv+V(:)'];
    rgb_Out = step(shapes, rgbData, lines);

    % Send image data to video player
    % Display original video.
    step(hVideoIn, rgbData);
    % Display video along with motion vectors.
    step(hVideoOut, rgb_Out);

    % Increment frame count
    nFrames = nFrames + 1;

track car using optical flow

hbfr = vision.BinaryFileReader( ...
        'Filename', 'viptraffic.bin');
hcr = vision.ChromaResampler(...
  'Resampling', '4:2:0 (MPEG1) to 4:4:4', ...
  'InterpolationFilter', 'Pixel replication');
hcsc1 = vision.ColorSpaceConverter('Conversion', 'YCbCr to RGB');
hcsc2 = vision.ColorSpaceConverter('Conversion', 'RGB to intensity');
idtc = vision.ImageDataTypeConverter('OutputDataType', 'single');
hof = vision.OpticalFlow( ...
    'OutputValue', 'Horizontal and vertical components in complex form', ...
    'ReferenceFrameDelay', 3);
hmean1 = vision.Mean;
hmean2 = vision.Mean('RunningMean', true);
hmedianfilt = vision.MedianFilter;
hclose = vision.MorphologicalClose('Neighborhood', strel('line',5,45));
hblob = vision.BlobAnalysis( ...
                    'CentroidOutputPort', false, ...
                    'AreaOutputPort', true, ...
                    'BoundingBoxOutputPort', true, ...
                    'OutputDataType', 'double', ...
                    'NumBlobsOutputPort',  false, ...
                    'MinimumBlobAreaSource', 'Property', ...
                    'MinimumBlobArea', 250, ...
                    'MaximumBlobAreaSource', 'Property', ...
                    'MaximumBlobArea', 3600, ...
                    'FillValues', -1, ...
                    'MaximumCount', 80);
herode = vision.MorphologicalErode('Neighborhood', strel('square',2));
hshapeins1 = vision.ShapeInserter( ...
            'BorderColor', 'Custom', ...
            'CustomBorderColor', [0 1 0]);
hshapeins2 = vision.ShapeInserter( ...
            'Shape','Lines', ...
            'BorderColor', 'Custom', ...
            'CustomBorderColor', [255 255 0]);
htextins = vision.TextInserter( ...
        'Text', '%4d', ...
        'Location',  [0 0], ...
        'Color', [1 1 1], ...
        'FontSize', 12);
sz = get(0,'ScreenSize');
pos = [20 sz(4)-300 200 200];
hVideo1 = vision.VideoPlayer('Name','Original Video','Position',pos);
pos(1) = pos(1)+220; % move the next viewer to the right
hVideo2 = vision.VideoPlayer('Name','Motion Vector','Position',pos);
pos(1) = pos(1)+220;
hVideo3 = vision.VideoPlayer('Name','Thresholded Video','Position',pos);
pos(1) = pos(1)+220;
hVideo4 = vision.VideoPlayer('Name','Results','Position',pos);

% Initialize some variables used in plotting motion vectors.
MotionVecGain = 20;
line_row =  22;
borderOffset   = 5;
decimFactorRow = 5;
decimFactorCol = 5;
firstTime = true;