diff --git a/src/trs/lexicon.rs b/src/trs/lexicon.rs index eda9975..cbe0db9 100644 --- a/src/trs/lexicon.rs +++ b/src/trs/lexicon.rs @@ -488,8 +488,8 @@ impl Lex { for arg_tp in arg_types { let subtype = arg_tp.apply(ctx); let arg_schema = TypeSchema::Monotype(arg_tp); - let result = self - .sample_term_internal( + let result = + self.sample_term_internal( &arg_schema, ctx, atom_weights, @@ -498,13 +498,12 @@ impl Lex { max_size, size, vars, - ) - .map_err(|_| SampleError::Subterm) - .and_then(|subterm| { - let tp = self.infer_term(&subterm, ctx)?.instantiate_owned(ctx); - ctx.unify_fast(subtype, tp)?; - Ok(subterm) - }); + ).map_err(|_| SampleError::Subterm) + .and_then(|subterm| { + let tp = self.infer_term(&subterm, ctx)?.instantiate_owned(ctx); + ctx.unify_fast(subtype, tp)?; + Ok(subterm) + }); match result { Ok(subterm) => { size += subterm.size(); @@ -1077,8 +1076,7 @@ impl GP for Lexicon { .read() .expect("poisoned lexicon") .background - .contains(&r) - || rng.gen_bool(params.p_keep) + .contains(&r) || rng.gen_bool(params.p_keep) }) }) .collect() diff --git a/src/trs/parser.rs b/src/trs/parser.rs index 746c2fb..0eba830 100644 --- a/src/trs/parser.rs +++ b/src/trs/parser.rs @@ -346,8 +346,7 @@ mod tests { &mut sig, &mut vars, &mut ops, - ) - .unwrap(); + ).unwrap(); assert_eq!(a.display(), "SUCC"); assert_eq!(s.to_string(), "int → int"); } @@ -389,9 +388,8 @@ mod tests { vec![], false, TypeContext::default(), - ) - .unwrap() - .1; + ).unwrap() + .1; let mut ctx = lex.0.read().expect("poisoned lexicon").ctx.clone(); let res = typed_rule("SUCC(x_) = ZERO", &mut lex, &mut ctx); @@ -409,9 +407,8 @@ mod tests { vec![], false, TypeContext::default(), - ) - .unwrap() - .1; + ).unwrap() + .1; let mut ctx = lex.0.read().expect("poisoned lexicon").ctx.clone(); let res = trs( CompleteStr("PLUS(ZERO x_) = ZERO; PLUS(SUCC(x_) y_) = SUCC(PLUS(x_ y_));"), @@ -436,9 +433,8 @@ mod tests { vec![], false, TypeContext::default(), - ) - .unwrap() - .1; + ).unwrap() + .1; let mut ctx = lex.0.read().expect("poisoned lexicon").ctx.clone(); let res = typed_context(CompleteStr("PLUS(x_ [!])"), &mut lex, &mut ctx); @@ -456,9 +452,8 @@ mod tests { vec![], false, TypeContext::default(), - ) - .unwrap() - .1; + ).unwrap() + .1; let mut ctx = lex.0.read().expect("poisoned lexicon").ctx.clone(); let res = typed_rulecontext(CompleteStr("PLUS(x_ [!]) = ZERO"), &mut lex, &mut ctx); @@ -476,9 +471,8 @@ mod tests { vec![], false, TypeContext::default(), - ) - .unwrap() - .1; + ).unwrap() + .1; let mut ctx = lex.0.read().expect("poisoned lexicon").ctx.clone(); let res = templates( CompleteStr("PLUS(x_ [!]) = ZERO; [!] = SUCC(ZERO);"), diff --git a/src/trs/rewrite.rs b/src/trs/rewrite.rs index 0491d1f..b2a4da8 100644 --- a/src/trs/rewrite.rs +++ b/src/trs/rewrite.rs @@ -12,7 +12,7 @@ use rand::Rng; use std::f64::NEG_INFINITY; use std::fmt; use term_rewriting::trace::Trace; -use term_rewriting::{Rule, RuleContext, Strategy as RewriteStrategy, TRS as UntypedTRS}; +use term_rewriting::{Rule, RuleContext, Strategy as RewriteStrategy, Term, TRS as UntypedTRS}; use super::{Lexicon, ModelParams, SampleError, TypeError}; @@ -279,13 +279,9 @@ impl TRS { ) -> Result { let mut trs = self.clone(); let context = sample_iter(rng, contexts, 1)?[0].clone(); - let rule = trs.lex.sample_rule_from_context( - context, - &mut trs.ctx, - atom_weights, - true, - max_size, - )?; + let rule = + trs.lex + .sample_rule_from_context(context, &mut trs.ctx, atom_weights, true, max_size)?; trs.lex .0 .write() @@ -304,11 +300,112 @@ impl TRS { Err(SampleError::OptionsExhausted) } else { let mut trs = self.clone(); - trs.utrs - .remove_clauses(sample_iter(rng, deletable, 1)?[0])?; + trs.utrs.remove_clauses(sample_iter(rng, deletable, 1)?[0])?; Ok(trs) } } + /// Replaces one subterm with another subterm in a main Term. + fn replace_term_helper(term: &Term, replace: &Term, new: Term) -> Term { + if Term::alpha(replace, term) != None { + let unwrapped_sub = Term::alpha(replace, term).unwrap(); + return new.substitute(&unwrapped_sub); + } else if term.args() != vec![] { + match *term { + Term::Variable(ref _var) => { + return term.clone(); + } + Term::Application { ref op, args: _ } => { + let mut args = term.args().clone(); + for idx in 0..args.len() { + args[idx] = TRS::replace_term_helper(&args[idx], replace, new.clone()); + } + let op = op.clone(); + return Term::Application { op, args }; + } + } + } + return term.clone(); + } + /// Replaces one subterm with another subterm in a given Rule. + fn replace_term_in_rule_helper(rule: &Rule, t: &Term, v: Term) -> Option { + let r = rule.clone(); + let lhs = TRS::replace_term_helper(&r.lhs, t, v.clone()); + let mut rhs: Vec = vec![]; + for idx in 0..r.rhs.len() { + rhs.push(TRS::replace_term_helper(&r.rhs[idx].clone(), t, v.clone())); + } + Rule::new(lhs, rhs) + } + // helper for routinization + // TODO fix rule lhs before inserting + fn inverse_evaluation_helper(rule: &Rule, t: &Term, rng: &mut R) -> Term { + let rhs_idx = rng.gen_range(0, rule.rhs.len()); + TRS::replace_term_helper(t, &rule.rhs[rhs_idx], rule.lhs.clone()) + } + /// Given two rules, attempts to apply one rule inversely to the other. + fn inverse_evaluate_rule_helper(rule: &Rule, r: &Rule, rng: &mut R) -> Option { + let lhs = TRS::inverse_evaluation_helper(rule, &r.lhs, rng); + let mut rhs: Vec = vec![]; + for idx in 0..r.rhs.len() { + rhs.push(TRS::inverse_evaluation_helper(rule, &r.rhs[idx], rng)); + } + Rule::new(lhs, rhs) + } + + /// Extracts a sngle rule from the TRS selecting only 1 rhs + fn sample_rule(&self, rng: &mut R) -> Result { + let mut trs = self.clone(); + let num_rules = self.len(); + let num_background = self + .lex + .0 + .read() + .expect("poisoned lexicon") + .background + .len(); + let ref_idx: usize = rng.gen_range(0, num_rules); + let mut ref_rule = trs.utrs.rules[ref_idx].clone(); + let rhs_idx = rng.gen_range(0, ref_rule.rhs.len()); + let temp_rule = Rule::new(ref_rule.lhs, vec![ref_rule.rhs[rhs_idx].clone()]); + if temp_rule == None { + return Err(SampleError::OptionsExhausted); + } + ref_rule = temp_rule.unwrap(); + if ref_rule.lhs.variables().len() < ref_rule.rhs[0].variables().len() { + return Err(SampleError::OptionsExhausted); + } + Ok(ref_rule) + } + /// Selects two Rules from the TRS at random and atempts to inverse evaluate one rule on the other, if it + /// succeeds it takes that new rule and inserts it imediately after the background. + pub fn inverse_evaluate(&self, rng: &mut R) -> Result { + let mut trs = self.clone(); + let num_rules = self.len(); + let num_background = self + .lex + .0 + .read() + .expect("poisoned lexicon") + .background + .len(); + if num_background >= num_rules - 1 { + return Err(SampleError::OptionsExhausted); + } + let mut target_idx: usize = rng.gen_range(num_background, num_rules); + let ref_rule = trs.sample_rule(rng)?; + let new_rule = + TRS::inverse_evaluate_rule_helper(&ref_rule, &trs.utrs.rules[target_idx], rng); + if new_rule == None { + return Err(SampleError::OptionsExhausted); + } + trs.utrs + .remove_clauses(&ref_rule) + .expect("removing old rule"); + trs.utrs + .insert_clauses(&new_rule.unwrap()) + .expect("inserting new rule"); + Ok(trs) + } } impl fmt::Display for TRS { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -331,3 +428,68 @@ impl fmt::Display for TRS { write!(f, "{}", trs_str) } } +#[cfg(test)] +mod tests { + use super::*; + extern crate polytype; + extern crate rand; + extern crate term_rewriting; + use rand::thread_rng; + use term_rewriting::{parse_rule, parse_term, Context, RuleContext, Signature}; + + #[test] + fn replace_term_helper_test() { + let mut sig = Signature::default(); + let term = parse_term(&mut sig, "A(y_ B(C) D)").expect("parse of A(y_ B(C) D)"); + let t = parse_term(&mut sig, "B(C)").expect("parse of B(C)"); + let v = parse_term(&mut sig, "x_").expect("parse of x_"); + + let new_term = TRS::replace_term_helper(&term, &t, v); + assert_eq!(new_term.display(), "A(y_ x_ D)"); + } + + #[test] + fn replace_term_in_rule_helper_test() { + let mut sig = Signature::default(); + let rule = + parse_rule(&mut sig, "A(y_ B(C) D) = B(C)").expect("parse of A(y_ B(C) D) = B(C)"); + let t = parse_term(&mut sig, "B(C)").expect("parse of B(C)"); + let v = parse_term(&mut sig, "x_").expect("parse of x_"); + + let new_rule = TRS::replace_term_in_rule_helper(&rule, &t, v); + if new_rule == None { + assert!(false); + } else { + let rule = new_rule.unwrap(); + assert_eq!(rule.display(), "A(y_ x_ D) = x_"); + } + } + + #[test] + fn inverse_evaluation_helper_test() { + let mut sig = Signature::default(); + + let t = parse_term(&mut sig, "A(B(x_ x_))").expect("parse of A(B(x_ x_))"); + let r = parse_rule(&mut sig, "C(y_) = B(y_ y_)").expect("parse of C(y_) = B(y_ y_)"); + let mut rng = thread_rng(); + + let result = TRS::inverse_evaluation_helper(&r, &t, &mut rng); + + assert_eq!(result.display(), "A(C(x_))"); + } + + #[test] + fn inverse_evluate_rule_helper_test() { + let mut sig = Signature::default(); + + let r = parse_rule(&mut sig, "A(B(x_ x_)) = D B(x_ x_)") + .expect("parse of A(B(x_ x_)) = D B(x_ x_)"); + let rule = parse_rule(&mut sig, "C(y_) = B(y_ y_)").expect("parse of C(y_) = B(y_ y_)"); + let mut rng = thread_rng(); + + let result = TRS::inverse_evaluate_rule_helper(&rule, &r, &mut rng); + + assert_eq!(result.unwrap().pretty(), "A(C(x_)) = D C(x_)"); + } + +}