123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281 |
- //#include "opencv2/imgproc.hpp"
- //#include "opencv2/imgcodecs.hpp"
- //#include "opencv2/highgui.hpp"
- //#include <iostream>
- //using namespace cv;
- //using std::cout;
- //int threshold_value = 0;
- //int threshold_type = 3;
- //int const max_value = 255;
- //int const max_type = 4;
- //int const max_binary_value = 255;
- //Mat src, src_gray, dst;
- //const char* window_name = "Threshold Demo";
- //const char* trackbar_type = "Type: \n 0: Binary \n 1: Binary Inverted \n 2: Truncate \n 3: To Zero \n 4: To Zero Inverted";
- //const char* trackbar_value = "Value";
- //static void Threshold_Demo( int, void* )
- //{
- // /* 0: Binary
- // 1: Binary Inverted
- // 2: Threshold Truncated
- // 3: Threshold to Zero
- // 4: Threshold to Zero Inverted
- // */
- // threshold( src_gray, dst, threshold_value, max_binary_value, threshold_type );
- // imshow( window_name, dst );
- //}
- //int main( int argc, char** argv )
- //{
- // String imageName("C:\\Documents\\2.png"); // by default
- // if (argc > 1)
- // {
- // imageName = argv[1];
- // }
- // src = imread( samples::findFile( imageName ), IMREAD_COLOR ); // Load an image
- // if (src.empty())
- // {
- // cout << "Cannot read the image: " << imageName << std::endl;
- // return -1;
- // }
- // cvtColor( src, src_gray, COLOR_BGR2GRAY ); // Convert the image to Gray
- // namedWindow( window_name, WINDOW_AUTOSIZE ); // Create a window to display results
- // createTrackbar( trackbar_type,
- // window_name, &threshold_type,
- // max_type, Threshold_Demo ); // Create a Trackbar to choose type of Threshold
- // createTrackbar( trackbar_value,
- // window_name, &threshold_value,
- // max_value, Threshold_Demo ); // Create a Trackbar to choose Threshold value
- // Threshold_Demo( 0, 0 ); // Call the function to initialize
- // waitKey();
- // return 0;
- //}
- #include <opencv2/opencv.hpp>
- #include <iostream>
- using namespace std;
- using namespace cv;
- int main()
- {
- Mat src=imread("C:\\Documents\\1.png", 1);
- Mat dst;//(src.rows,src.cols,CV_8UC4);
- Mat tmp,alpha;
- cvtColor(src,tmp,COLOR_BGR2GRAY);
- threshold(tmp,alpha,254,255,THRESH_BINARY_INV | THRESH_OTSU);
- Mat rgb[3];
- split(src,rgb);
- Mat rgba[4]={rgb[0],rgb[1],rgb[2],alpha};
- merge(rgba,4,dst);
- imwrite("C:\\Documents\\alpa.png",tmp);
- imwrite("C:\\Documents\\dst9.png",dst);
- waitKey();
- return 0;
- }
- //#include <opencv2/imgproc.hpp>
- //#include <opencv2/highgui.hpp>
- //#include <opencv2/opencv.hpp>
- //#include <vector>
- //#include <algorithm>
- //#include <tuple>
- //#include <iostream>
- //using namespace cv;
- //struct comparator{
- // bool operator() (std::tuple<std::vector<cv::Point>, bool, double> t1,
- // std::tuple<std::vector<cv::Point>, bool, double> t2) {
- // return std::get<2>(t1) > std::get<2>(t2);
- // }
- //} comparator;
- //int main(int, char**)
- //{
- // // get image
- // cv::Mat image = cv::imread("C:\\Documents\\2.png");
- // cv::Mat grayImg;
- // // convert to greyscale
- // cv::cvtColor(image, grayImg, COLOR_BGRA2GRAY);
- //// cv::Mat canny;
- //// cv::Canny(grayImg,canny, 120, 255, 3);
- // // finding threshes
- // cv::Mat thresh;
- // cv::threshold(grayImg,thresh, 100, 255, THRESH_BINARY_INV| THRESH_OTSU);
- // // finding contours
- // std::vector<std::vector<cv::Point>> contours;
- // std::vector<cv::Vec4i> hierarchy;
- // findContours( thresh, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE, Point(0, 0) );
- // // finding max contour
- // std::vector<std::tuple<std::vector<cv::Point>, bool, double>> vec;
- // for(size_t i = 0; i < contours.size(); ++i){
- // vec.push_back(std::make_tuple(contours.at(i), cv::isContourConvex(contours.at(i)),cv::contourArea(contours.at(i))));
- // }
- // std::sort(vec.begin(), vec.end(), comparator);
- // std::tuple<std::vector<cv::Point>, bool, double> maxContour;
- // maxContour = vec.at(0);
- // // create mask
- // cv::Mat mask = Mat::zeros(thresh.size(), CV_8U);
- //// for(size_t i = 0; i < contours.size(); ++i){
- //// cv::fillConvexPoly(mask, std::get<0>(vec.at(i)), Scalar(255,0,0),8,0);
- //// }
- // cv::fillConvexPoly(mask, std::get<0>(maxContour), Scalar(255,0,0),8,0);
- // // bitwise
- // cv::Mat res;
- // cv::bitwise_and(image, image, res, mask);
- // // show process
- // imshow("result", res);
- // imshow("mask", mask);
- // imshow("canny", thresh);
- // imshow("source", image);
- // // create transparent background
- // Mat dst;
- // Mat rgb[3];
- // split(image,rgb);
- // Mat rgba[4]={rgb[0],rgb[1],rgb[2], thresh};
- // merge(rgba,4,dst);
- // // save to file transparent and cropped images
- // imwrite("C:/Documents/21.png", res);
- // imwrite("C:/Documents/dst.png",dst);
- // imwrite("C:/Documents/mask.png",mask);
- // imwrite("C:/Documents/thresh.png",thresh);
- // imwrite("C:/Documents/src.png",image);
- // while (true) {
- // if (waitKey() == 27) { //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
- // std::cout << "esc key is pressed by user";
- // break;
- // }
- // }
- // return 0;
- //}
- //#include "opencv2/objdetect.hpp"
- //#include "opencv2/highgui.hpp"
- //#include "opencv2/imgproc.hpp"
- //#include "opencv2/videoio.hpp"
- //#include <iostream>
- //using namespace std;
- //using namespace cv;
- //void detectAndDisplay( Mat frame );
- //CascadeClassifier face_cascade;
- //CascadeClassifier eyes_cascade;
- //int main( int argc, const char** argv )
- //{
- // CommandLineParser parser(argc, argv,
- // "{help h||}"
- // "{face_cascade|data/haarcascades/haarcascade_frontalface_alt.xml|Path to face cascade.}"
- // "{eyes_cascade|data/haarcascades/haarcascade_eye_tree_eyeglasses.xml|Path to eyes cascade.}"
- // "{camera|0|Camera device number.}");
- // parser.about( "\nThis program demonstrates using the cv::CascadeClassifier class to detect objects (Face + eyes) in a video stream.\n"
- // "You can use Haar or LBP features.\n\n" );
- // parser.printMessage();
- // String face_cascade_name = samples::findFile( parser.get<String>("face_cascade") );
- // String eyes_cascade_name = samples::findFile( parser.get<String>("eyes_cascade") );
- // //-- 1. Load the cascades
- // if( !face_cascade.load( face_cascade_name ) )
- // {
- // cout << "--(!)Error loading face cascade\n";
- // return -1;
- // };
- // if( !eyes_cascade.load( eyes_cascade_name ) )
- // {
- // cout << "--(!)Error loading eyes cascade\n";
- // return -1;
- // };
- // int camera_device = parser.get<int>("camera");
- // VideoCapture capture;
- // //-- 2. Read the video stream
- // capture.open( camera_device );
- // if ( ! capture.isOpened() )
- // {
- // cout << "--(!)Error opening video capture\n";
- // return -1;
- // }
- // Mat frame;
- // while ( capture.read(frame) )
- // {
- // if( frame.empty() )
- // {
- // cout << "--(!) No captured frame -- Break!\n";
- // break;
- // }
- // //-- 3. Apply the classifier to the frame
- // detectAndDisplay( frame );
- // if( waitKey(10) == 27 )
- // {
- // break; // escape
- // }
- // }
- // return 0;
- //}
- //void detectAndDisplay( Mat frame )
- //{
- // Mat frame_gray;
- // cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
- // equalizeHist( frame_gray, frame_gray );
- // //-- Detect faces
- // std::vector<Rect> faces;
- // face_cascade.detectMultiScale( frame_gray, faces );
- // for ( size_t i = 0; i < faces.size(); i++ )
- // {
- // Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
- // ellipse( frame, center, Size( faces[i].width/2, faces[i].height/2 ), 0, 0, 360, Scalar( 255, 0, 255 ), 4 );
- // Mat faceROI = frame_gray( faces[i] );
- // //-- In each face, detect eyes
- // std::vector<Rect> eyes;
- // eyes_cascade.detectMultiScale( faceROI, eyes );
- // for ( size_t j = 0; j < eyes.size(); j++ )
- // {
- // Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
- // int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
- // circle( frame, eye_center, radius, Scalar( 255, 0, 0 ), 4 );
- // }
- // }
- // //-- Show what you got
- // imshow( "Capture - Face detection", frame );
- //}
|