main.cpp 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. //#include "opencv2/imgproc.hpp"
  2. //#include "opencv2/imgcodecs.hpp"
  3. //#include "opencv2/highgui.hpp"
  4. //#include <iostream>
  5. //using namespace cv;
  6. //using std::cout;
  7. //int threshold_value = 0;
  8. //int threshold_type = 3;
  9. //int const max_value = 255;
  10. //int const max_type = 4;
  11. //int const max_binary_value = 255;
  12. //Mat src, src_gray, dst;
  13. //const char* window_name = "Threshold Demo";
  14. //const char* trackbar_type = "Type: \n 0: Binary \n 1: Binary Inverted \n 2: Truncate \n 3: To Zero \n 4: To Zero Inverted";
  15. //const char* trackbar_value = "Value";
  16. //static void Threshold_Demo( int, void* )
  17. //{
  18. // /* 0: Binary
  19. // 1: Binary Inverted
  20. // 2: Threshold Truncated
  21. // 3: Threshold to Zero
  22. // 4: Threshold to Zero Inverted
  23. // */
  24. // threshold( src_gray, dst, threshold_value, max_binary_value, threshold_type );
  25. // imshow( window_name, dst );
  26. //}
  27. //int main( int argc, char** argv )
  28. //{
  29. // String imageName("C:\\Documents\\2.png"); // by default
  30. // if (argc > 1)
  31. // {
  32. // imageName = argv[1];
  33. // }
  34. // src = imread( samples::findFile( imageName ), IMREAD_COLOR ); // Load an image
  35. // if (src.empty())
  36. // {
  37. // cout << "Cannot read the image: " << imageName << std::endl;
  38. // return -1;
  39. // }
  40. // cvtColor( src, src_gray, COLOR_BGR2GRAY ); // Convert the image to Gray
  41. // namedWindow( window_name, WINDOW_AUTOSIZE ); // Create a window to display results
  42. // createTrackbar( trackbar_type,
  43. // window_name, &threshold_type,
  44. // max_type, Threshold_Demo ); // Create a Trackbar to choose type of Threshold
  45. // createTrackbar( trackbar_value,
  46. // window_name, &threshold_value,
  47. // max_value, Threshold_Demo ); // Create a Trackbar to choose Threshold value
  48. // Threshold_Demo( 0, 0 ); // Call the function to initialize
  49. // waitKey();
  50. // return 0;
  51. //}
  52. #include <opencv2/opencv.hpp>
  53. #include <iostream>
  54. using namespace std;
  55. using namespace cv;
  56. int main()
  57. {
  58. Mat src=imread("C:\\Documents\\1.png", 1);
  59. Mat dst;//(src.rows,src.cols,CV_8UC4);
  60. Mat tmp,alpha;
  61. cvtColor(src,tmp,COLOR_BGR2GRAY);
  62. threshold(tmp,alpha,254,255,THRESH_BINARY_INV | THRESH_OTSU);
  63. Mat rgb[3];
  64. split(src,rgb);
  65. Mat rgba[4]={rgb[0],rgb[1],rgb[2],alpha};
  66. merge(rgba,4,dst);
  67. imwrite("C:\\Documents\\alpa.png",tmp);
  68. imwrite("C:\\Documents\\dst9.png",dst);
  69. waitKey();
  70. return 0;
  71. }
  72. //#include <opencv2/imgproc.hpp>
  73. //#include <opencv2/highgui.hpp>
  74. //#include <opencv2/opencv.hpp>
  75. //#include <vector>
  76. //#include <algorithm>
  77. //#include <tuple>
  78. //#include <iostream>
  79. //using namespace cv;
  80. //struct comparator{
  81. // bool operator() (std::tuple<std::vector<cv::Point>, bool, double> t1,
  82. // std::tuple<std::vector<cv::Point>, bool, double> t2) {
  83. // return std::get<2>(t1) > std::get<2>(t2);
  84. // }
  85. //} comparator;
  86. //int main(int, char**)
  87. //{
  88. // // get image
  89. // cv::Mat image = cv::imread("C:\\Documents\\2.png");
  90. // cv::Mat grayImg;
  91. // // convert to greyscale
  92. // cv::cvtColor(image, grayImg, COLOR_BGRA2GRAY);
  93. //// cv::Mat canny;
  94. //// cv::Canny(grayImg,canny, 120, 255, 3);
  95. // // finding threshes
  96. // cv::Mat thresh;
  97. // cv::threshold(grayImg,thresh, 100, 255, THRESH_BINARY_INV| THRESH_OTSU);
  98. // // finding contours
  99. // std::vector<std::vector<cv::Point>> contours;
  100. // std::vector<cv::Vec4i> hierarchy;
  101. // findContours( thresh, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE, Point(0, 0) );
  102. // // finding max contour
  103. // std::vector<std::tuple<std::vector<cv::Point>, bool, double>> vec;
  104. // for(size_t i = 0; i < contours.size(); ++i){
  105. // vec.push_back(std::make_tuple(contours.at(i), cv::isContourConvex(contours.at(i)),cv::contourArea(contours.at(i))));
  106. // }
  107. // std::sort(vec.begin(), vec.end(), comparator);
  108. // std::tuple<std::vector<cv::Point>, bool, double> maxContour;
  109. // maxContour = vec.at(0);
  110. // // create mask
  111. // cv::Mat mask = Mat::zeros(thresh.size(), CV_8U);
  112. //// for(size_t i = 0; i < contours.size(); ++i){
  113. //// cv::fillConvexPoly(mask, std::get<0>(vec.at(i)), Scalar(255,0,0),8,0);
  114. //// }
  115. // cv::fillConvexPoly(mask, std::get<0>(maxContour), Scalar(255,0,0),8,0);
  116. // // bitwise
  117. // cv::Mat res;
  118. // cv::bitwise_and(image, image, res, mask);
  119. // // show process
  120. // imshow("result", res);
  121. // imshow("mask", mask);
  122. // imshow("canny", thresh);
  123. // imshow("source", image);
  124. // // create transparent background
  125. // Mat dst;
  126. // Mat rgb[3];
  127. // split(image,rgb);
  128. // Mat rgba[4]={rgb[0],rgb[1],rgb[2], thresh};
  129. // merge(rgba,4,dst);
  130. // // save to file transparent and cropped images
  131. // imwrite("C:/Documents/21.png", res);
  132. // imwrite("C:/Documents/dst.png",dst);
  133. // imwrite("C:/Documents/mask.png",mask);
  134. // imwrite("C:/Documents/thresh.png",thresh);
  135. // imwrite("C:/Documents/src.png",image);
  136. // while (true) {
  137. // if (waitKey() == 27) { //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
  138. // std::cout << "esc key is pressed by user";
  139. // break;
  140. // }
  141. // }
  142. // return 0;
  143. //}
  144. //#include "opencv2/objdetect.hpp"
  145. //#include "opencv2/highgui.hpp"
  146. //#include "opencv2/imgproc.hpp"
  147. //#include "opencv2/videoio.hpp"
  148. //#include <iostream>
  149. //using namespace std;
  150. //using namespace cv;
  151. //void detectAndDisplay( Mat frame );
  152. //CascadeClassifier face_cascade;
  153. //CascadeClassifier eyes_cascade;
  154. //int main( int argc, const char** argv )
  155. //{
  156. // CommandLineParser parser(argc, argv,
  157. // "{help h||}"
  158. // "{face_cascade|data/haarcascades/haarcascade_frontalface_alt.xml|Path to face cascade.}"
  159. // "{eyes_cascade|data/haarcascades/haarcascade_eye_tree_eyeglasses.xml|Path to eyes cascade.}"
  160. // "{camera|0|Camera device number.}");
  161. // parser.about( "\nThis program demonstrates using the cv::CascadeClassifier class to detect objects (Face + eyes) in a video stream.\n"
  162. // "You can use Haar or LBP features.\n\n" );
  163. // parser.printMessage();
  164. // String face_cascade_name = samples::findFile( parser.get<String>("face_cascade") );
  165. // String eyes_cascade_name = samples::findFile( parser.get<String>("eyes_cascade") );
  166. // //-- 1. Load the cascades
  167. // if( !face_cascade.load( face_cascade_name ) )
  168. // {
  169. // cout << "--(!)Error loading face cascade\n";
  170. // return -1;
  171. // };
  172. // if( !eyes_cascade.load( eyes_cascade_name ) )
  173. // {
  174. // cout << "--(!)Error loading eyes cascade\n";
  175. // return -1;
  176. // };
  177. // int camera_device = parser.get<int>("camera");
  178. // VideoCapture capture;
  179. // //-- 2. Read the video stream
  180. // capture.open( camera_device );
  181. // if ( ! capture.isOpened() )
  182. // {
  183. // cout << "--(!)Error opening video capture\n";
  184. // return -1;
  185. // }
  186. // Mat frame;
  187. // while ( capture.read(frame) )
  188. // {
  189. // if( frame.empty() )
  190. // {
  191. // cout << "--(!) No captured frame -- Break!\n";
  192. // break;
  193. // }
  194. // //-- 3. Apply the classifier to the frame
  195. // detectAndDisplay( frame );
  196. // if( waitKey(10) == 27 )
  197. // {
  198. // break; // escape
  199. // }
  200. // }
  201. // return 0;
  202. //}
  203. //void detectAndDisplay( Mat frame )
  204. //{
  205. // Mat frame_gray;
  206. // cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
  207. // equalizeHist( frame_gray, frame_gray );
  208. // //-- Detect faces
  209. // std::vector<Rect> faces;
  210. // face_cascade.detectMultiScale( frame_gray, faces );
  211. // for ( size_t i = 0; i < faces.size(); i++ )
  212. // {
  213. // Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
  214. // ellipse( frame, center, Size( faces[i].width/2, faces[i].height/2 ), 0, 0, 360, Scalar( 255, 0, 255 ), 4 );
  215. // Mat faceROI = frame_gray( faces[i] );
  216. // //-- In each face, detect eyes
  217. // std::vector<Rect> eyes;
  218. // eyes_cascade.detectMultiScale( faceROI, eyes );
  219. // for ( size_t j = 0; j < eyes.size(); j++ )
  220. // {
  221. // Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
  222. // int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
  223. // circle( frame, eye_center, radius, Scalar( 255, 0, 0 ), 4 );
  224. // }
  225. // }
  226. // //-- Show what you got
  227. // imshow( "Capture - Face detection", frame );
  228. //}