Commit 77f5ad7c authored by Frontull Samuel's avatar Frontull Samuel
Browse files

restructured

parent 99e9c01b
......@@ -16,9 +16,14 @@ opam install dune
That's it. To run the `main.ml` file type
```
dune exec bin/main.exe
dune exec bin/main.exe max_depth simultaneous term_str
```
- max_depth: is the max number of compositions in the computation of the legal paths
- simultaneous: `true` or `false`, denotes whether simultaneous substitution is applied
- dot: `true` or `false`, denotes whether you want to generate a dot file
- term_str: is a term in quotes in the format `UT = (UT) | /x ... y.UT | UT ... UT | x`, e.g. `"(/x.x z) y"`
## Library
The `lib` folder contains the source code.
......@@ -31,9 +36,21 @@ The `lib` folder contains the source code.
- `underlined.ml` - Underline Lambda Calculus
- `untyped.ml` - Untyped Lambda Calculus
## Exponentiation of Church numerals
With the two script `naive.ml` and `nocapture.ml` you can compare the naive capture-permitting reduction of church numerals to the safe capture-avoiding variant.
```
dune exec exp/naive.exe 2 2
```
```
dune exec exp/nocapture.exe 2 2
```
## DOT files
To view the generated dot-files you can:
To view the dot-files you can:
- use [xdot](https://formulae.brew.sh/formula/xdot), a interactive viewer for graphs written in Graphviz's dot language or
- install [graphviz](https://graphviz.org/) and convert them to e.g. a pdf file with following command (for a single file):
```
......
......@@ -6,33 +6,25 @@ let p_term term reduction scc_paths =
let tree_of_string s = Grammar.input Lexer.token (Lexing.from_string s)
let string_of_command termstrparsed =
let tmp_file = Filename.temp_file "" ".txt" in
let _ =
Sys.command @@ "python3 stlc-infer.py '" ^ termstrparsed ^ "' >" ^ tmp_file
in
let chan = open_in tmp_file in
let s = input_line chan in
close_in chan;
s
let () =
if Array.length Sys.argv < 4 then
failwith "Usage: dune exec bin/main.exe max_depth simultaneous term_str"
failwith "Usage: dune exec bin/main.exe max_depth simultaneous dot term_str"
else
let term =
try tree_of_string (Sys.argv.(3) ^ "\n")
try tree_of_string (Sys.argv.(4) ^ "\n")
with _ -> failwith "Syntax error"
in
let max_depth = int_of_string Sys.argv.(1) in
let simultaneous = bool_of_string Sys.argv.(2) in
let generate_dot = bool_of_string Sys.argv.(3) in
let scc_analysis = Core.scc term max_depth simultaneous in
let scc_paths = snd scc_analysis in
let vc_terminated = fst (fst scc_analysis) in
let typable =
string_of_command (Core.a_term_to_string (Core.make_closed term))
let closed_term =
Core.a_term_to_string (Core.make_closed term)
in
let normalizing = typable = "Typable." in
let normalizing = false in
(* typable = "Typable." in *)
(* snd (fst scc_analysis) in *)
let reduction =
if normalizing then Core.sn_reduce_to_nf Core.casubst term
......@@ -65,14 +57,15 @@ let () =
in
let dot = p_term term reduction_str scc_paths in
Printf.printf
"- vc: %s\n\
- typable: %s\n\
"- vc: %s\n\
- closed_term: %s\n\
- max_depth: %d\n\
- order_consistent: %s\n\
- is_safe: %s\n\
- normal form: %s\n\
- safe_naming: %s\n\
- is_linear: %s\n\
- term: %s" uvc typable max_depth ord_con is_safe
- dot: %s" uvc closed_term max_depth ord_con is_safe
(List.hd (List.rev reduction_str))
safe_naming is_linear dot
safe_naming is_linear dot;
if generate_dot then Dot.export_term_ctx_dot [] term "dot/" "out"
import os
import json
import yaml
import subprocess
import progressbar
from time import sleep
N = 10000
bar = progressbar.ProgressBar(maxval=N, \
widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
def check():
term = subprocess.check_output('bnfgen --separator "" grammar.bnf', shell=True, timeout=20)
term = term.decode('utf-8').strip()
if len(term) > 150:
return
res = subprocess.check_output('../main.exe 50 false "' + term + '"', shell=True, timeout=20)
dct = yaml.safe_load(res)
vc = dct[0]['vc'] == "Yes, unavoidable"
vc_avoidable = dct[0]['vc'] == "Yes, but avoidable"
typable = dct[1]['typable'] == "Typable."
nf = dct[5]['normal form']
safenaming = dct[6]['safe_naming'] == "Yes."
fresh = "_" in nf
if vc and fresh and typable:
print(term)
return term
# if vc_avoidable and typable:
# print("Avoidable: " + term)
# return term
else:
return None
bar.start()
terms = []
for i in range(0,N):
t = check()
if t != None:
terms.append(t)
bar.update(i+1)
bar.finish()
print(terms)
# bnfgen
Script that generates random terms according to `grammar.bnf`
## Dependencies
```
opam install bnfgen
```
## Execution
Run following command in the `bnf` directory.
```
bnfgen --separator "" grammar.bnf
```
......@@ -11,14 +11,6 @@ let generate_curch_numeral n = "/f x." ^ _generate_church_numeral n
let tree_of_string s = Grammar.input Lexer.token (Lexing.from_string s)
let con_a n = ((n * n) - n) / 2
let con_b n = n * ((3 * n) - 5) / 2
let con_c n = (n - 1) * (n - 1)
let predict_conversions b n = (b * b * con_a n) - (b * con_b n) + con_c n
let () =
if Array.length Sys.argv < 3 then failwith "Usage: dune exec bin/main.exe b n"
else
......@@ -42,8 +34,7 @@ let () =
Printf.printf "Execution time (%d): %f seconds\n"
(String.length (Core.a_term_to_string s))
diff;
Printf.printf "Alpha conversions: %d (%d)\n" (c2 - c1)
(predict_conversions b n);
Printf.printf "Alpha conversions: %d\n" (c2 - c1);
Printf.printf "Beta conversions: %d\n" (bc2 - bc1)
(* Printf.printf "%s\n" (Core.a_term_to_string (List.hd (List.rev reduction))); *)
......@@ -534,7 +534,8 @@ let composition newpaths acc nodes bedges =
app_composition nodes x wbps)
acc
in
L.filter is_legal (linear_list (L.flatten comp))
(* L.filter is_legal (linear_list (L.flatten comp)) *)
linear_list (L.flatten comp)
let rec similar_redex wbp = function
| x :: xs ->
......@@ -585,7 +586,7 @@ let well_balanced_paths term =
| _ -> Empty)
apps
in
let max_compositions = 10 in
let max_compositions = 12 in
let compose = _well_balanced_paths acc acc nodes bedges max_compositions in
(* let applam = L.filter (fun p -> islam_node (last_wbp p)) (snd compose) in *)
(fst compose, snd compose)
......@@ -825,6 +826,20 @@ let rec default_names_r term pos =
let default_names term = default_names_r term "e"
let rec _subterm_at term pos current =
match term with
| AVar (s, t) -> term
| ALam ((xs, e), t) ->
if pos = current then term else
_subterm_at e pos (current ^ "0")
| AApp (e1, e2, t) ->
if pos = current then term
else if is_prefix (current ^ "0") pos then
_subterm_at e1 pos (current ^ "0")
else _subterm_at e2 pos (current ^ "1")
let subterm_at term pos = _subterm_at term pos "e"
let beta subst term =
(* Printf.printf "beta: %s\n" (a_term_to_string term); *)
beta_counter := !beta_counter + 1;
......@@ -965,6 +980,14 @@ let rec nf subst term =
let new_term = reduce_lo subst term in
if new_term = term then term else nf subst new_term
let rec _nf_arr acc subst term =
(* Printf.printf "%d %s\n" (size term) (a_term_to_string term); *)
let new_term = reduce_lo subst term in
if new_term = term then acc else _nf_arr (acc @ [new_term]) subst new_term
let rec nf_arr subst term =
_nf_arr [] subst term
let rec whnf subst term =
(* Printf.printf "%d %s\n" (size term) (a_term_to_string term); *)
let new_term = reduce_lazy subst term in
......
......@@ -232,7 +232,7 @@ let export_term_ctx_dot ctx tterm dir name =
let scc_paths = scc tterm 10 true in
let reduction = L.map Core.a_term_to_string (reduce_to_nf tterm 10) in
let oc = open_out fname in
Printf.printf "Generating file %s..." fname;
(* Printf.printf "Generating file %s..." fname; *)
fprintf oc "%s\n" (term_ctx_to_dot ctx tterm reduction (snd scc_paths) name);
close_out oc;
Printf.printf "done!\t\n"
(* Printf.printf "done!\t\n" *)
# stylish-haskell configuration file
# ==================================
# The stylish-haskell tool is mainly configured by specifying steps. These steps
# are a list, so they have an order, and one specific step may appear more than
# once (if needed). Each file is processed by these steps in the given order.
steps:
# Convert some ASCII sequences to their Unicode equivalents. This is disabled
# by default.
# - unicode_syntax:
# # In order to make this work, we also need to insert the UnicodeSyntax
# # language pragma. If this flag is set to true, we insert it when it's
# # not already present. You may want to disable it if you configure
# # language extensions using some other method than pragmas. Default:
# # true.
# add_language_pragma: true
# Align the right hand side of some elements. This is quite conservative
# and only applies to statements where each element occupies a single
# line. All default to true.
- simple_align:
cases: true
top_level_patterns: true
records: true
# Import cleanup
- imports:
# There are different ways we can align names and lists.
#
# - global: Align the import names and import list throughout the entire
# file.
#
# - file: Like global, but don't add padding when there are no qualified
# imports in the file.
#
# - group: Only align the imports per group (a group is formed by adjacent
# import lines).
#
# - none: Do not perform any alignment.
#
# Default: global.
align: global
# The following options affect only import list alignment.
#
# List align has following options:
#
# - after_alias: Import list is aligned with end of import including
# 'as' and 'hiding' keywords.
#
# > import qualified Data.List as List (concat, foldl, foldr, head,
# > init, last, length)
#
# - with_alias: Import list is aligned with start of alias or hiding.
#
# > import qualified Data.List as List (concat, foldl, foldr, head,
# > init, last, length)
#
# - with_module_name: Import list is aligned `list_padding` spaces after
# the module name.
#
# > import qualified Data.List as List (concat, foldl, foldr, head,
# init, last, length)
#
# This is mainly intended for use with `pad_module_names: false`.
#
# > import qualified Data.List as List (concat, foldl, foldr, head,
# init, last, length, scanl, scanr, take, drop,
# sort, nub)
#
# - new_line: Import list starts always on new line.
#
# > import qualified Data.List as List
# > (concat, foldl, foldr, head, init, last, length)
#
# Default: after_alias
list_align: after_alias
# Right-pad the module names to align imports in a group:
#
# - true: a little more readable
#
# > import qualified Data.List as List (concat, foldl, foldr,
# > init, last, length)
# > import qualified Data.List.Extra as List (concat, foldl, foldr,
# > init, last, length)
#
# - false: diff-safe
#
# > import qualified Data.List as List (concat, foldl, foldr, init,
# > last, length)
# > import qualified Data.List.Extra as List (concat, foldl, foldr,
# > init, last, length)
#
# Default: true
pad_module_names: true
# Long list align style takes effect when import is too long. This is
# determined by 'columns' setting.
#
# - inline: This option will put as much specs on same line as possible.
#
# - new_line: Import list will start on new line.
#
# - new_line_multiline: Import list will start on new line when it's
# short enough to fit to single line. Otherwise it'll be multiline.
#
# - multiline: One line per import list entry.
# Type with constructor list acts like single import.
#
# > import qualified Data.Map as M
# > ( empty
# > , singleton
# > , ...
# > , delete
# > )
#
# Default: inline
long_list_align: inline
# Align empty list (importing instances)
#
# Empty list align has following options
#
# - inherit: inherit list_align setting
#
# - right_after: () is right after the module name:
#
# > import Vector.Instances ()
#
# Default: inherit
empty_list_align: inherit
# List padding determines indentation of import list on lines after import.
# This option affects 'long_list_align'.
#
# - <integer>: constant value
#
# - module_name: align under start of module name.
# Useful for 'file' and 'group' align settings.
#
# Default: 4
list_padding: 4
# Separate lists option affects formatting of import list for type
# or class. The only difference is single space between type and list
# of constructors, selectors and class functions.
#
# - true: There is single space between Foldable type and list of it's
# functions.
#
# > import Data.Foldable (Foldable (fold, foldl, foldMap))
#
# - false: There is no space between Foldable type and list of it's
# functions.
#
# > import Data.Foldable (Foldable(fold, foldl, foldMap))
#
# Default: true
separate_lists: true
# Space surround option affects formatting of import lists on a single
# line. The only difference is single space after the initial
# parenthesis and a single space before the terminal parenthesis.
#
# - true: There is single space associated with the enclosing
# parenthesis.
#
# > import Data.Foo ( foo )
#
# - false: There is no space associated with the enclosing parenthesis
#
# > import Data.Foo (foo)
#
# Default: false
space_surround: false
# Language pragmas
- language_pragmas:
# We can generate different styles of language pragma lists.
#
# - vertical: Vertical-spaced language pragmas, one per line.
#
# - compact: A more compact style.
#
# - compact_line: Similar to compact, but wrap each line with
# `{-#LANGUAGE #-}'.
#
# Default: vertical.
style: vertical
# Align affects alignment of closing pragma brackets.
#
# - true: Brackets are aligned in same column.
#
# - false: Brackets are not aligned together. There is only one space
# between actual import and closing bracket.
#
# Default: true
align: true
# stylish-haskell can detect redundancy of some language pragmas. If this
# is set to true, it will remove those redundant pragmas. Default: true.
remove_redundant: true
# Replace tabs by spaces. This is disabled by default.
# - tabs:
# # Number of spaces to use for each tab. Default: 8, as specified by the
# # Haskell report.
# spaces: 8
# Remove trailing whitespace
- trailing_whitespace: {}
# Squash multiple spaces between the left and right hand sides of some
# elements into single spaces. Basically, this undoes the effect of
# simple_align but is a bit less conservative.
# - squash: {}
# A common setting is the number of columns (parts of) code will be wrapped
# to. Different steps take this into account. Default: 80.
columns: 80
# By default, line endings are converted according to the OS. You can override
# preferred format here.
#
# - native: Native newline format. CRLF on Windows, LF on other OSes.
#
# - lf: Convert to LF ("\n").
#
# - crlf: Convert to CRLF ("\r\n").
#
# Default: native.
newline: native
# Sometimes, language extensions are specified in a cabal file or from the
# command line instead of using language pragmas in the file. stylish-haskell
# needs to be aware of these, so it can parse the file correctly.
#
# No language extensions are enabled by default.
# language_extensions:
# - TemplateHaskell
# - QuasiQuotes
# Attempt to find the cabal file in ancestors of the current directory, and
# parse options (currently only language extensions) from that.
#
# Default: true
cabal: true
{-# LANGUAGE RankNTypes #-}
{- Church Encodings -
- - - - - - - - - - -
- By Risto Stevcev -}
module Church where
import Prelude hiding (succ, pred, and, or, not, exp, div, head, tail)
{- Church Logical Operators -
- - - - - - - - - - - - - - -}
type ChurchBool = forall a. a -> a -> a
-- Church Boolean (True)
-- λt.λf.t
true :: ChurchBool
true = \t -> \f -> t
-- Church Boolean (False)
-- λt.λf.f
false :: ChurchBool
false = \t -> \f -> f
-- Church AND
-- λa.λb.a b false
and :: ChurchBool -> ChurchBool -> ChurchBool
and = \a -> \b -> a b false
-- Church OR
-- λa.λb.a true b
or :: ChurchBool -> ChurchBool -> ChurchBool
or = \a -> \b -> a true b
-- Church NOT
-- λp.λa.λb.p b a
not :: ChurchBool -> ChurchBool
not = \p -> \a -> \b -> p b a
-- Church XOR
-- λa.λb.a (not b) b
xor :: ChurchBool -> ChurchBool -> ChurchBool
xor = \a -> \b -> a (not b) b
-- Convert Church Boolean to Haskell Bool
-- (λa.λb.λc.c a b) True False
unchurch_bool :: (Bool -> Bool -> a) -> a
unchurch_bool = (\a -> \b -> \c -> c a b) True False
{- Church Natural Numbers (n ∈ ℕ) -
- - - - - - - - - - - - - - - - - -}
-- Rolling/unrolling the ChurchNum is required for Church subtraction
-- The type system isn't powerful enough to handle it otherwise.
--
type ChurchNum = forall a. (a -> a) -> a -> a
newtype Church = Church { unChurch :: ChurchNum }
-- Church Numeral: 0
-- λf.λx.x
zero :: Church
zero = Church $ \f -> \x -> x
-- Church Numeral: 1
-- λf.λx.f x
one :: Church
one = Church $ \f -> \x -> f x
-- Church Numeral: 2
-- λf.λx.f (f x)
two :: Church
two = Church $ \f -> \x -> f (f x)
-- Church Numeral: 3
-- λf.λx.f (f (f x))
three :: Church
three = Church $ \f -> \x -> f (f (f x))
-- Church Numeral: n (where n ∈ ℕ)
-- num 0 = λf.λx.x
-- num n = λf.λx.f (num (n-1) f x)
num :: Integer -> Church
num 0 = Church $ \f -> \x -> x
num n = Church $ \f -> \x -> f ((unChurch $ num (n-1)) f x)
-- Convert Church Numeral (n ∈ ℕ) to Haskell Integer
-- λa.a (λb.b+1) (0)
unchurch_num :: Church -> Integer
unchurch_num = \a -> unChurch a (\b -> b + 1) (0)
{- Church Conditionals -
- - - - - - - - - - - -}
-- Church Conditional (If/Else)
-- λp.λa.λb.p a b
ifelse :: ChurchBool -> a -> a -> a
ifelse = \p -> \a -> \b -> p a b
{- Church Loops -
- - - - - - - - -}
-- Y Combinator
-- Y = λf.(λx.f (x x)) (λx.f (x x))
--
-- Beta reduction of this gives,
-- Y g = (λf.(λx.f (x x)) (λx.f (x x))) g
-- = (λx.g (x x)) (λx.g (x x))
-- = g((λx.g (x x)) (λx.g (x x)))
-- = g (Y g)
y g = g (y g)
-- A non-recursive version of the Y combinator
newtype Mu a = Mu (Mu a -> a)
ynr f = (\h -> h $ Mu h) (\x -> f . (\(Mu g) -> g) x $ x)
{- Church Arithmetic Operators (n ∈ ℕ) -
- - - - - - - - - - - - - - - - - - - -}
-- Church Successor