diff --git a/run.sh b/run.sh index fe8f00f..7ec9af9 100755 --- a/run.sh +++ b/run.sh @@ -15,6 +15,8 @@ DIR=/pub/data/chenzh/data/sat2022 INSTANCE=03bb7baaa45980753a0e7050ae44755d-atco_enc3_opt1_03_53.cnf -make -j 16 && mpirun --bind-to none -np 9 --allow-run-as-root ./light -i $DIR/$INSTANCE --share=1 --threads=16 --times=3600 +# make -j 16 && mpirun --bind-to none -np 9 --allow-run-as-root valgrind ./light -i $DIR/$INSTANCE --share=1 --threads=16 --times=3600 + +make -j 16 && mpirun --bind-to none -np 9 --allow-run-as-root ./light -i ./data/hard1.cnf --share=1 --threads=16 --times=3600 #./light -i $DIR/$INSTANCE --share=1 --threads=16 --times=3600 diff --git a/src/distributed/leader.hpp b/src/distributed/leader.hpp index 555941a..995be6f 100644 --- a/src/distributed/leader.hpp +++ b/src/distributed/leader.hpp @@ -29,8 +29,6 @@ void leader_main(light* S, int num_procs, int rank) { MPI_Send(&start, 1, MPI_INT, i, START_TAG, MPI_COMM_WORLD); } - - // preprocess 证明了UNSAT 则不需要启动云计算 if(!start) { MPI_Barrier(MPI_COMM_WORLD); @@ -54,7 +52,7 @@ void leader_main(light* S, int num_procs, int rank) { printf("c [leader] hand out length of cnf instance to all nodes\n"); - int cnf_length = str_ref.size(); + int cnf_length = str_ref.size() + 1; MPI_Barrier(MPI_COMM_WORLD); diff --git a/src/workers/clause.hpp b/src/workers/clause.hpp index 74b1e9a..8111356 100644 --- a/src/workers/clause.hpp +++ b/src/workers/clause.hpp @@ -30,11 +30,11 @@ struct clause_store { res = (res + d * b) % MOD; b = (b * B) % MOD; } + + return res; } int hash_code() { - const int B = 31; - const int MOD = 10000007; return __hash(31, 1000000007) ^ __hash(17, 1000000009); } diff --git a/src/workers/sharer.cpp b/src/workers/sharer.cpp index b31b284..348eb5f 100644 --- a/src/workers/sharer.cpp +++ b/src/workers/sharer.cpp @@ -37,7 +37,7 @@ void share_clauses_to_next_node(int from, const std::vectorhash_code()]) continue; + //if(clause_imported[cls[i]->hash_code()]) continue; send_length += (cls[i]->size + 2); } @@ -48,7 +48,7 @@ void share_clauses_to_next_node(int from, const std::vectorhash_code()]) continue; + // if(clause_imported[cls[i]->hash_code()]) continue; auto& c = cls[i]; send_buf[index++] = c->size; send_buf[index++] = c->lbd; @@ -97,18 +97,23 @@ bool receive_clauses_from_last_node(std::vector> &claus MPI_Get_count(&status, MPI_INT, &count); from = buf[index++]; + while(index < count) { num_received_clauses_by_network++; shared_ptr cl = std::make_shared(buf[index++]); + cl->lbd = buf[index++]; for(int i=0; isize; i++) { cl->data[i] = buf[index++]; } + if(clause_imported[cl->hash_code()]) { + num_skip_clauses_by_network++; continue; } + clauses.push_back(cl); } @@ -148,40 +153,47 @@ void sharer::do_clause_sharing() { printf("c [worker] round %d, time: %d.%d\n", nums, solve_time / 1000, solve_time % 1000); + // 导入外部网络传输的子句 + std::vector> clauses; + int from = receive_clauses_from_last_node(clauses); + if(from != -1 && clauses.size() > 0) { + printf("c [worker] get %d exported clauses from network\n", clauses.size()); + + for (int j = 0; j < consumers.size(); j++) { + consumers[j]->import_clauses_from(clauses); + } + + for (int k = 0; k < clauses.size(); k++) { + clause_imported[clauses[k]->hash_code()] = true; + } + + // 传递外部网络传输的子句给下个节点 + share_clauses_to_next_node(from, clauses); + } + // printf("start sharing %d\n", sq->share_intv); for (int i = 0; i < producers.size(); i++) { cls.clear(); producers[i]->export_clauses_to(cls); - //printf("c size %d\n", sq->cls.size()); - int number = cls.size(); - - // printf("c [worker] thread-%d: get %d exported clauses\n", i, number); + // 删除掉重复的学习子句 + int t_size = cls.size(); + for(int i=0; ihash_code()]) { + std::swap(cls[i], cls[t_size-1]); + t_size--; + } + } + cls.resize(t_size); //分享当前节点产生的子句 if(cls.size() > 0) share_clauses_to_next_node(rank, cls); - // 导入外部网络传输的子句 - std::vector> clauses; - int from = receive_clauses_from_last_node(clauses); - if(from != -1) { - for (int j = 0; j < consumers.size(); j++) { - for (int k = 0; k < clauses.size(); k++) - clauses[k]->increase_refs(1); - consumers[j]->import_clauses_from(clauses); - } - - // 传递外部网络传输的子句给下个节点 - share_clauses_to_next_node(from, clauses); - - for (int k = 0; k < clauses.size(); k++) { - clause_imported[clauses[k]->hash_code()] = true; - clauses[k]->free_clause(); - } - } + //printf("c [worker] thread-%d: get %d exported clauses\n", i, t_size); - //导入当前节点产生的子句 - // int percent = sort_clauses(i); + // 增加 lits 限制 + int percent = sort_clauses(i); + // if (percent < 75) { // producers[i]->broaden_export_limit(); // } @@ -191,13 +203,10 @@ void sharer::do_clause_sharing() { for (int j = 0; j < consumers.size(); j++) { if (producers[i]->id == consumers[j]->id) continue; - for (int k = 0; k < cls.size(); k++) - cls[k]->increase_refs(1); consumers[j]->import_clauses_from(cls); } for (int k = 0; k < cls.size(); k++) { clause_imported[cls[k]->hash_code()] = true; - cls[k]->free_clause(); } }