修复读入文件没有\0结尾的BUG,增加 lits 限制
This commit is contained in:
parent
bb23becb96
commit
17a9b0062e
4
run.sh
4
run.sh
@ -15,6 +15,8 @@
|
|||||||
DIR=/pub/data/chenzh/data/sat2022
|
DIR=/pub/data/chenzh/data/sat2022
|
||||||
INSTANCE=03bb7baaa45980753a0e7050ae44755d-atco_enc3_opt1_03_53.cnf
|
INSTANCE=03bb7baaa45980753a0e7050ae44755d-atco_enc3_opt1_03_53.cnf
|
||||||
|
|
||||||
make -j 16 && mpirun --bind-to none -np 9 --allow-run-as-root ./light -i $DIR/$INSTANCE --share=1 --threads=16 --times=3600
|
# make -j 16 && mpirun --bind-to none -np 9 --allow-run-as-root valgrind ./light -i $DIR/$INSTANCE --share=1 --threads=16 --times=3600
|
||||||
|
|
||||||
|
make -j 16 && mpirun --bind-to none -np 9 --allow-run-as-root ./light -i ./data/hard1.cnf --share=1 --threads=16 --times=3600
|
||||||
|
|
||||||
#./light -i $DIR/$INSTANCE --share=1 --threads=16 --times=3600
|
#./light -i $DIR/$INSTANCE --share=1 --threads=16 --times=3600
|
||||||
|
@ -29,8 +29,6 @@ void leader_main(light* S, int num_procs, int rank) {
|
|||||||
MPI_Send(&start, 1, MPI_INT, i, START_TAG, MPI_COMM_WORLD);
|
MPI_Send(&start, 1, MPI_INT, i, START_TAG, MPI_COMM_WORLD);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// preprocess 证明了UNSAT 则不需要启动云计算
|
// preprocess 证明了UNSAT 则不需要启动云计算
|
||||||
if(!start) {
|
if(!start) {
|
||||||
MPI_Barrier(MPI_COMM_WORLD);
|
MPI_Barrier(MPI_COMM_WORLD);
|
||||||
@ -54,7 +52,7 @@ void leader_main(light* S, int num_procs, int rank) {
|
|||||||
|
|
||||||
printf("c [leader] hand out length of cnf instance to all nodes\n");
|
printf("c [leader] hand out length of cnf instance to all nodes\n");
|
||||||
|
|
||||||
int cnf_length = str_ref.size();
|
int cnf_length = str_ref.size() + 1;
|
||||||
|
|
||||||
MPI_Barrier(MPI_COMM_WORLD);
|
MPI_Barrier(MPI_COMM_WORLD);
|
||||||
|
|
||||||
|
@ -30,11 +30,11 @@ struct clause_store {
|
|||||||
res = (res + d * b) % MOD;
|
res = (res + d * b) % MOD;
|
||||||
b = (b * B) % MOD;
|
b = (b * B) % MOD;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
int hash_code() {
|
int hash_code() {
|
||||||
const int B = 31;
|
|
||||||
const int MOD = 10000007;
|
|
||||||
return __hash(31, 1000000007) ^ __hash(17, 1000000009);
|
return __hash(31, 1000000007) ^ __hash(17, 1000000009);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,7 +37,7 @@ void share_clauses_to_next_node(int from, const std::vector<shared_ptr<clause_st
|
|||||||
|
|
||||||
// 初始化发送数据
|
// 初始化发送数据
|
||||||
for(int i=0; i<cls.size(); i++) {
|
for(int i=0; i<cls.size(); i++) {
|
||||||
if(clause_imported[cls[i]->hash_code()]) continue;
|
//if(clause_imported[cls[i]->hash_code()]) continue;
|
||||||
send_length += (cls[i]->size + 2);
|
send_length += (cls[i]->size + 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -48,7 +48,7 @@ void share_clauses_to_next_node(int from, const std::vector<shared_ptr<clause_st
|
|||||||
send_buf[index++] = from;
|
send_buf[index++] = from;
|
||||||
|
|
||||||
for(int i=0; i<cls.size(); i++) {
|
for(int i=0; i<cls.size(); i++) {
|
||||||
if(clause_imported[cls[i]->hash_code()]) continue;
|
// if(clause_imported[cls[i]->hash_code()]) continue;
|
||||||
auto& c = cls[i];
|
auto& c = cls[i];
|
||||||
send_buf[index++] = c->size;
|
send_buf[index++] = c->size;
|
||||||
send_buf[index++] = c->lbd;
|
send_buf[index++] = c->lbd;
|
||||||
@ -97,18 +97,23 @@ bool receive_clauses_from_last_node(std::vector<shared_ptr<clause_store>> &claus
|
|||||||
MPI_Get_count(&status, MPI_INT, &count);
|
MPI_Get_count(&status, MPI_INT, &count);
|
||||||
|
|
||||||
from = buf[index++];
|
from = buf[index++];
|
||||||
|
|
||||||
while(index < count) {
|
while(index < count) {
|
||||||
num_received_clauses_by_network++;
|
num_received_clauses_by_network++;
|
||||||
|
|
||||||
shared_ptr<clause_store> cl = std::make_shared<clause_store>(buf[index++]);
|
shared_ptr<clause_store> cl = std::make_shared<clause_store>(buf[index++]);
|
||||||
|
|
||||||
cl->lbd = buf[index++];
|
cl->lbd = buf[index++];
|
||||||
for(int i=0; i<cl->size; i++) {
|
for(int i=0; i<cl->size; i++) {
|
||||||
cl->data[i] = buf[index++];
|
cl->data[i] = buf[index++];
|
||||||
}
|
}
|
||||||
|
|
||||||
if(clause_imported[cl->hash_code()]) {
|
if(clause_imported[cl->hash_code()]) {
|
||||||
|
|
||||||
num_skip_clauses_by_network++;
|
num_skip_clauses_by_network++;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
clauses.push_back(cl);
|
clauses.push_back(cl);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -148,40 +153,47 @@ void sharer::do_clause_sharing() {
|
|||||||
|
|
||||||
printf("c [worker] round %d, time: %d.%d\n", nums, solve_time / 1000, solve_time % 1000);
|
printf("c [worker] round %d, time: %d.%d\n", nums, solve_time / 1000, solve_time % 1000);
|
||||||
|
|
||||||
|
// 导入外部网络传输的子句
|
||||||
|
std::vector<shared_ptr<clause_store>> clauses;
|
||||||
|
int from = receive_clauses_from_last_node(clauses);
|
||||||
|
if(from != -1 && clauses.size() > 0) {
|
||||||
|
printf("c [worker] get %d exported clauses from network\n", clauses.size());
|
||||||
|
|
||||||
|
for (int j = 0; j < consumers.size(); j++) {
|
||||||
|
consumers[j]->import_clauses_from(clauses);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int k = 0; k < clauses.size(); k++) {
|
||||||
|
clause_imported[clauses[k]->hash_code()] = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// 传递外部网络传输的子句给下个节点
|
||||||
|
share_clauses_to_next_node(from, clauses);
|
||||||
|
}
|
||||||
|
|
||||||
// printf("start sharing %d\n", sq->share_intv);
|
// printf("start sharing %d\n", sq->share_intv);
|
||||||
for (int i = 0; i < producers.size(); i++) {
|
for (int i = 0; i < producers.size(); i++) {
|
||||||
cls.clear();
|
cls.clear();
|
||||||
producers[i]->export_clauses_to(cls);
|
producers[i]->export_clauses_to(cls);
|
||||||
|
|
||||||
//printf("c size %d\n", sq->cls.size());
|
// 删除掉重复的学习子句
|
||||||
int number = cls.size();
|
int t_size = cls.size();
|
||||||
|
for(int i=0; i<t_size; i++) {
|
||||||
// printf("c [worker] thread-%d: get %d exported clauses\n", i, number);
|
if(clause_imported[cls[i]->hash_code()]) {
|
||||||
|
std::swap(cls[i], cls[t_size-1]);
|
||||||
|
t_size--;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cls.resize(t_size);
|
||||||
|
|
||||||
//分享当前节点产生的子句
|
//分享当前节点产生的子句
|
||||||
if(cls.size() > 0) share_clauses_to_next_node(rank, cls);
|
if(cls.size() > 0) share_clauses_to_next_node(rank, cls);
|
||||||
|
|
||||||
// 导入外部网络传输的子句
|
//printf("c [worker] thread-%d: get %d exported clauses\n", i, t_size);
|
||||||
std::vector<shared_ptr<clause_store>> clauses;
|
|
||||||
int from = receive_clauses_from_last_node(clauses);
|
|
||||||
if(from != -1) {
|
|
||||||
for (int j = 0; j < consumers.size(); j++) {
|
|
||||||
for (int k = 0; k < clauses.size(); k++)
|
|
||||||
clauses[k]->increase_refs(1);
|
|
||||||
consumers[j]->import_clauses_from(clauses);
|
|
||||||
}
|
|
||||||
|
|
||||||
// 传递外部网络传输的子句给下个节点
|
|
||||||
share_clauses_to_next_node(from, clauses);
|
|
||||||
|
|
||||||
for (int k = 0; k < clauses.size(); k++) {
|
|
||||||
clause_imported[clauses[k]->hash_code()] = true;
|
|
||||||
clauses[k]->free_clause();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//导入当前节点产生的子句
|
// 增加 lits 限制
|
||||||
// int percent = sort_clauses(i);
|
int percent = sort_clauses(i);
|
||||||
|
|
||||||
// if (percent < 75) {
|
// if (percent < 75) {
|
||||||
// producers[i]->broaden_export_limit();
|
// producers[i]->broaden_export_limit();
|
||||||
// }
|
// }
|
||||||
@ -191,13 +203,10 @@ void sharer::do_clause_sharing() {
|
|||||||
|
|
||||||
for (int j = 0; j < consumers.size(); j++) {
|
for (int j = 0; j < consumers.size(); j++) {
|
||||||
if (producers[i]->id == consumers[j]->id) continue;
|
if (producers[i]->id == consumers[j]->id) continue;
|
||||||
for (int k = 0; k < cls.size(); k++)
|
|
||||||
cls[k]->increase_refs(1);
|
|
||||||
consumers[j]->import_clauses_from(cls);
|
consumers[j]->import_clauses_from(cls);
|
||||||
}
|
}
|
||||||
for (int k = 0; k < cls.size(); k++) {
|
for (int k = 0; k < cls.size(); k++) {
|
||||||
clause_imported[cls[k]->hash_code()] = true;
|
clause_imported[cls[k]->hash_code()] = true;
|
||||||
cls[k]->free_clause();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user