Julien Dehos il y a 7 ans
Parent
commit
a38d3abaa3
9 fichiers modifiés avec 8394928 ajouts et 17 suppressions
  1. 4 3
      Makefile
  2. 20 2
      README.md
  3. 8394757 0
      canyon.pgm
  4. 26 0
      image.hpp
  5. 19 5
      laplacien_mpi_1.cpp
  6. 22 6
      laplacien_mpi_2.cpp
  7. 47 0
      laplacien_omp.cpp
  8. 32 0
      plot_local.sh
  9. 1 1
      run.sh

+ 4 - 3
Makefile

@@ -1,6 +1,7 @@
 all:
-	mpic++ -std=c++11 -Wall -Wextra -o laplacien_mpi_1.out laplacien_mpi_1.cpp 
-	mpic++ -std=c++11 -Wall -Wextra -o laplacien_mpi_2.out laplacien_mpi_2.cpp 
+	mpic++ -O2 -std=c++11 -Wall -Wextra -Wno-unknown-pragmas -o laplacien_mpi_1.out laplacien_mpi_1.cpp 
+	mpic++ -O2 -std=c++11 -Wall -Wextra -Wno-unknown-pragmas -o laplacien_mpi_2.out laplacien_mpi_2.cpp 
+	g++ -O2 -std=c++11 -Wall -Wextra -fopenmp -o laplacien_omp.out laplacien_omp.cpp 
 
 clean:
-	rm -f laplacien_mpi_?.out output_?.pgm OAR.*
+	rm -f laplacien_*.out 

+ 20 - 2
README.md

@@ -3,16 +3,34 @@
 ## description
  
 - algo distribué qui découpe une image, l'envoie aux processus esclaves (calcul du laplacien) et fusionne les résultats
-- code C++ avec la bibliothèque boost::mpi 
+- code C++ avec la bibliothèque mpi (C)
 - exécuté avec l'environnement openmpi
 
-## compilation et exécution
+## compilation 
 
 ```
 make
+```
+
+## exécution sur une grille OAR 
+
+```
 oarsub -S ./run.sh
 ```
 
+## exécution en local
+
+```
+# version MPI avec code maitre et code esclave mélangé
+mpirun -n 4 ./laplacien_mpi_1.out canyon.pgm canyon_mpi_1.pgm 100 10
+
+# version MPI avec code maitre et code esclave séparé
+mpirun -n 4 ./laplacien_mpi_2.out canyon.pgm canyon_mpi_2.pgm 100 10
+
+# version OpenMP
+OMP_NUM_THREADS=4 ./laplacien_omp.out canyon.pgm canyon_omp.pgm 100 10
+```
+
 ## références
 
 - [http://oar.imag.fr/docs/latest/user/usecases.html](http://oar.imag.fr/docs/latest/user/usecases.html)

Fichier diff supprimé car celui-ci est trop grand
+ 8394757 - 0
canyon.pgm


+ 26 - 0
image.hpp

@@ -128,5 +128,31 @@ image_t computeLaplacian(const image_t & data1, int width, int height,
     return data2;
 }
 
+image_t computeLaplacianOmp(const image_t & data1, int width, int height, 
+        double scaling)
+{
+    assert(data1.size() >= unsigned(width*height));
+    // return the pixel value of (x,y) in [0,1]
+    auto ind = [&data1,width](int x, int y) 
+    {return double(data1[y*width + x])/255.0;};
+    // compute laplacian image
+    image_t data2(width*height);
+#pragma omp parallel for
+    for (int x=1; x<width-1; x++)
+    {
+        for (int y=1; y<height-1; y++)
+        {
+            // compute laplacian value in [-4,4]
+            double v = -4.0*ind(x,y) 
+                + ind(x,y-1) + ind(x,y+1) + ind(x-1,y) + ind(x+1,y);
+            // scale the value in [0,255]
+            // and write this value in output image
+            int i255 = 255.0 * (scaling*v+4.0)/8.0;
+            data2[y*width + x] = std::min(255, std::max(0, i255));
+        }
+    }
+    return data2;
+}
+
 #endif
 

+ 19 - 5
laplacien_mpi_1.cpp

@@ -1,5 +1,6 @@
 // mpic++ -std=c++11 -Wall -Wextra -o laplacien_mpi_1.out laplacien_mpi_1.cpp 
-// mpirun -n 4 ./laplacien_mpi_1.out
+// mpirun -n 4 ./laplacien_mpi_1.out canyon.pgm canyon_mpi_1.pgm
+// mix master code and slave code 
 
 #include "image.hpp"
 #include <algorithm>
@@ -15,12 +16,23 @@ int main(int argc, char ** argv)
     MPI_Comm_rank(MPI_COMM_WORLD, &worldRank);
     double t0 = MPI_Wtime();
 
+    if (argc != 5)
+    {
+        std::cout << "usage: " << argv[0] 
+            << " <input> <output> <scaling> <nb fakes>\n";
+        exit(-1);
+    }
+    const char * INPUT = argv[1];
+    const char * OUTPUT = argv[2];
+    const float SCALING = atof(argv[3]);
+    const int NB_FAKES = atoi(argv[4]);
+
     // read image (master node)
     image_t data0;
     int width, height;
     if (worldRank == 0)  
     {
-        std::string readError = readPgm("backloop.pgm", width, height, data0);
+        std::string readError = readPgm(INPUT, width, height, data0);
         if (readError != "")
         {
             std::cout << readError << std::endl;
@@ -51,7 +63,9 @@ int main(int argc, char ** argv)
             0, MPI_COMM_WORLD);
 
     // compute on each node
-    image_t nodeResult = computeLaplacian(nodeData, width, heightN, 10.0);
+    image_t nodeResult = computeLaplacian(nodeData, width, heightN, SCALING);
+    for (int k=0; k<NB_FAKES; ++k)
+        nodeResult = computeLaplacian(nodeData, width, heightN, SCALING);
 
     // receive results from nodes
     image_t data2(size2);
@@ -62,9 +76,9 @@ int main(int argc, char ** argv)
     // write output image (master node)
     if (worldRank == 0)
     {
-        writePgm("output_1.pgm", width, height, data2);
+        writePgm(OUTPUT, width, height, data2);
         double t1 = MPI_Wtime();
-        std::cout << "walltime = " << t1 - t0 << std::endl;
+        std::cout << t1 - t0;
     }
 
     MPI_Finalize();

+ 22 - 6
laplacien_mpi_2.cpp

@@ -1,5 +1,6 @@
 // mpic++ -std=c++11 -Wall -Wextra -o laplacien_mpi_2.out laplacien_mpi_2.cpp 
-// mpirun -n 4 ./laplacien_mpi_2.out
+// mpirun -n 4 ./laplacien_mpi_2.out canyon.pgm canyon_mpi_2.pgm
+// separate master code and slave code 
 
 #include "image.hpp"
 #include <algorithm>
@@ -14,6 +15,17 @@ int main(int argc, char ** argv)
     int worldRank;
     MPI_Comm_rank(MPI_COMM_WORLD, &worldRank);
 
+    if (argc != 5)
+    {
+        std::cout << "usage: " << argv[0] 
+            << " <input> <output> <scaling> <nb fakes>\n";
+        exit(-1);
+    }
+    const char * INPUT = argv[1];
+    const char * OUTPUT = argv[2];
+    const float SCALING = atof(argv[3]);
+    const int NB_FAKES = atoi(argv[4]);
+
     if (worldRank == 0)  // master node
     {
         double t0 = MPI_Wtime();
@@ -21,7 +33,7 @@ int main(int argc, char ** argv)
         // read image
         image_t data0;
         int width, height;
-        std::string readError = readPgm("backloop.pgm", width, height, data0);
+        std::string readError = readPgm(INPUT, width, height, data0);
         if (readError != "")
         {
             std::cout << readError << std::endl;
@@ -48,7 +60,9 @@ int main(int argc, char ** argv)
                 0, MPI_COMM_WORLD);
 
         // compute master data
-        image_t nodeResult = computeLaplacian(nodeData, width, heightN, 10.0);
+        image_t nodeResult = computeLaplacian(nodeData, width, heightN, SCALING);
+        for (int k=0; k<NB_FAKES; ++k)
+            nodeResult = computeLaplacian(nodeData, width, heightN, SCALING);
 
         // receive results from slave nodes
         image_t data2(size2, 0);
@@ -57,9 +71,9 @@ int main(int argc, char ** argv)
                 0, MPI_COMM_WORLD);
 
         // write output image
-        writePgm("output_2.pgm", width, height, data2);
+        writePgm(OUTPUT, width, height, data2);
         double t1 = MPI_Wtime();
-        std::cout << "walltime = " << t1 - t0 << std::endl;
+        std::cout << t1 - t0;
     }
     else  // slave nodes
     {
@@ -77,7 +91,9 @@ int main(int argc, char ** argv)
                 0, MPI_COMM_WORLD);
 
         // compute node data
-        image_t nodeResult = computeLaplacian(nodeData, width, heightN, 10.0);
+        image_t nodeResult = computeLaplacian(nodeData, width, heightN, SCALING);
+        for (int k=0; k<NB_FAKES; ++k)
+            nodeResult = computeLaplacian(nodeData, width, heightN, SCALING);
 
         // send results to master node
         MPI_Gather(nodeResult.data(), sizeN, MPI_UNSIGNED_CHAR, 

+ 47 - 0
laplacien_omp.cpp

@@ -0,0 +1,47 @@
+// g++ -std=c++11 -Wall -Wextra -o laplacien_omp.out laplacien_omp.cpp 
+// OMP_NUM_THREADS=4 ./laplacien_omp.out canyon.pgm canyon_omp.pgm
+// mix master code and slave code 
+
+#include "image.hpp"
+#include <algorithm>
+#include <omp.h>
+
+int main(int argc, char ** argv)
+{
+    if (argc != 5)
+    {
+        std::cout << "usage: " << argv[0] 
+            << " <input> <output> <scaling> <nb fakes>\n";
+        exit(-1);
+    }
+    const char * INPUT = argv[1];
+    const char * OUTPUT = argv[2];
+    const float SCALING = atof(argv[3]);
+    const int NB_FAKES = atoi(argv[4]);
+
+    double t0 = omp_get_wtime();
+
+    // read image 
+    image_t data0;
+    int width, height;
+    std::string readError = readPgm(INPUT, width, height, data0);
+    if (readError != "")
+    {
+        std::cout << readError << std::endl;
+        exit(-1);
+    }
+
+    // compute whole image
+    image_t data2 = computeLaplacianOmp(data0, width, height, SCALING);
+    for (int k=0; k<NB_FAKES; ++k)
+        data2 = computeLaplacianOmp(data0, width, height, SCALING);
+
+    // write output image
+    writePgm(OUTPUT, width, height, data2);
+
+    double t1 = omp_get_wtime();
+    std::cout << t1 - t0;
+
+    return 0;
+}
+

+ 32 - 0
plot_local.sh

@@ -0,0 +1,32 @@
+#!/bin/sh
+
+NB_THREADS_MAX=12
+SCALING=100
+NB_FAKES=5
+
+CSV="times.csv"
+SVG="times.svg"
+
+rm -f ${CSV}
+TMP=`mktemp`
+for n in `seq ${NB_THREADS_MAX}` ; do
+    echo "${n} thread(s)"
+    echo -e "${n}; \c" >> ${CSV}
+    mpirun -n ${n} ./laplacien_mpi_1.out canyon.pgm ${TMP} ${SCALING} ${NB_FAKES} | awk -v ORS="; " '{print $1}' >> ${CSV}
+    mpirun -n ${n} ./laplacien_mpi_2.out canyon.pgm ${TMP} ${SCALING} ${NB_FAKES} | awk -v ORS="; " '{print $1}' >> ${CSV}
+    OMP_NUM_THREADS=${n} ./laplacien_omp.out canyon.pgm ${TMP} ${SCALING} ${NB_FAKES} | awk '{print $1}' >> ${CSV}
+done
+rm ${TMP}
+echo "output: ${CSV}"
+
+gnuplot -e "set out '${SVG}'; \
+    set terminal svg size 1280,720; \
+    set style data linespoints; \
+    set grid xtics ytics; \
+    plot '${CSV}' using 1:2 title 'mpi1', \
+    '${CSV}' using 1:3 title 'mpi2', \
+    '${CSV}' using 1:4 title 'omp';
+"
+echo "output: ${SVG}"
+
+

+ 1 - 1
run.sh

@@ -6,6 +6,6 @@
 ulimit -s unlimited
 NSLOTS=$(cat $OAR_NODEFILE | wc -l)
 PREF=$(dirname `which mpirun` | awk -F'/[^/]*$' '{print $1}')
-mpirun --prefix $PREF -np $NSLOTS -machinefile $OAR_NODEFILE -mca orte_rsh_agent "oarsh" ./laplacien_mpi_1.out
+mpirun --prefix $PREF -np $NSLOTS -machinefile $OAR_NODEFILE -mca orte_rsh_agent "oarsh" ./laplacien_mpi_1.out canyon.pgm canyon_mpi_1.pgm 100 10
 exit $?