diff --git a/tp9/apl1test.py b/tp9/apl1test.py
new file mode 100644
index 0000000000000000000000000000000000000000..8533ccaca5d99e7cfb83d6d86aa9334bb6a73a40
--- /dev/null
+++ b/tp9/apl1test.py
@@ -0,0 +1,89 @@
+import thonnycontrib
+from thonnycontrib.backend.evaluator import Evaluator
+import thonnycontrib.backend.l1test_backend
+from thonny.plugins.cpython_backend.cp_back import MainCPythonBackend
+import thonnycontrib.backend.doctest_parser 
+from thonnycontrib.backend.doctest_parser import ExampleWithExpected, ExampleWithoutExpected
+import thonnycontrib.backend.ast_parser
+from thonnycontrib.backend.ast_parser import L1DocTest
+import thonnycontrib.backend.verdicts
+from thonnycontrib.backend.verdicts.ExceptionVerdict import ExceptionVerdict 
+
+import inspect
+import tempfile
+import os
+import sys
+        
+class MockBackend(MainCPythonBackend):
+    """
+    Fake backend.
+    """
+    def __init__(self):
+        ...
+
+    def send_message(self, msg) -> None:
+        ...
+
+# register backend
+thonnycontrib.backend.l1test_backend.BACKEND = MockBackend()
+
+def l1test_to_org(filename: str, source: str=""):
+    """
+    Return an org abstract of the tests presents in `filename` file.
+    """
+    abstract = {'total': 0,
+                'success': 0,
+                'failures': 0,
+                'errors': 0,
+                'empty': 0}
+
+    if source == "":
+        with open(filename, 'rt') as fin:
+            source = fin.read()
+    evaluator = Evaluator(filename=filename,
+                          source=source)
+    tests = evaluator.evaluate()
+    n = len(tests)
+    abstract['total'] = n
+    res = ""
+    for test in tests:
+        examples = test.get_examples()
+        res_examples = ""
+        nb_test, nb_test_ok = 0, 0
+        empty = True
+        for example in examples:
+            verdict = test.get_verdict_from_example(example)
+            if isinstance(example, ExampleWithExpected):
+                nb_test += 1
+                if verdict.isSuccess():
+                    nb_test_ok += 1
+                    abstract['success'] += 1
+                else:
+                    abstract['failures'] += 1
+                empty = False
+            if isinstance(verdict, ExceptionVerdict):
+                abstract['errors'] += 1
+                empty = False
+            res_examples += f"** {verdict}\n\n"
+            if not verdict.isSuccess():
+                res_examples += f"   {verdict.get_details()}\n\n"
+        if not empty: 
+            res += f"* {test.get_name()} ~ {nb_test_ok}/{nb_test} réussis\n\n"
+        else:
+            abstract['empty'] += 1
+            res += f"* {test.get_name()}\n\n Aucun test trouvé !\n\n"
+        res += res_examples
+    res = f"Tests exécutés : {abstract['total']}\nSuccès: {abstract['success']}, \
+Echecs: {abstract['failures']}, Erreurs: {abstract['errors']}, \
+Vide: {abstract['empty']}\n\n" + res
+    return res
+
+
+def testmod(modulename: str):
+    """
+    mimic the doctest.testmod function
+    for `modulename` module
+    """
+    print(l1test_to_org(modulename))
+
+