commit 69d9b56047865209633daaf886e2551027611a61 Author: Nils Hölscher Date: Tue Apr 19 10:56:42 2022 +0200 first commit diff --git a/CacheAnalysisPass/CacheAnalysisPass.cpp b/CacheAnalysisPass/CacheAnalysisPass.cpp new file mode 100644 index 0000000..584b548 --- /dev/null +++ b/CacheAnalysisPass/CacheAnalysisPass.cpp @@ -0,0 +1,320 @@ +//============================================================================= +// FILE: +// CacheAnalysisPass.cpp +// +// DESCRIPTION: +// Visits all functions in a module, prints their names and the number of +// arguments via stderr. Strictly speaking, this is an analysis pass (i.e. +// the functions are not modified). However, in order to keep things simple +// there's no 'print' method here (every analysis pass should implement it). +// +// USAGE: +// New PM: +// opt -load-pass-plugin=libCacheAnalysisPass.dylib -passes=lru-misses `\` +// -disable-output +// +// +// License: MIT +//============================================================================= +#include "llvm/ADT/StringRef.h" +#include "llvm/Analysis/LoopInfo.h" +#include "llvm/Analysis/ScalarEvolution.h" +#include "llvm/IR/BasicBlock.h" +#include "llvm/IR/CFG.h" +#include "llvm/IR/Instruction.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/PassManager.h" +#include "llvm/IR/Value.h" +#include "llvm/Pass.h" +#include "llvm/Passes/PassBuilder.h" +#include "llvm/Passes/PassPlugin.h" + +#include +#include +#include +#include + +#include "../include/AbstractCache.h" +#include "../include/CacheType.h" + +using namespace llvm; + +//----------------------------------------------------------------------------- +// CacheAnalysisPass implementation +//----------------------------------------------------------------------------- +// No need to expose the internals of the pass to the outside world - keep +// everything in an anonymous namespace. +namespace { + +std::string typeToName(Type::TypeID Id) { + switch (Id) { + case Type::TypeID::ArrayTyID: + return "ArrayTy"; + case Type::TypeID::BFloatTyID: + return "BFloatTy"; + case Type::TypeID::FloatTyID: + return "FloatTy"; + case Type::TypeID::DoubleTyID: + return "DoubleTy"; + case Type::TypeID::FixedVectorTyID: + return "FixedVectorTy"; + case Type::TypeID::FP128TyID: + return "FP128Ty"; + case Type::TypeID::FunctionTyID: + return "FunctionTy"; + case Type::TypeID::HalfTyID: + return "HalfTy"; + case Type::TypeID::IntegerTyID: + return "IntegerTy"; + case Type::TypeID::LabelTyID: + return "LabelTy"; + case Type::TypeID::MetadataTyID: + return "MetadataTy"; + case Type::TypeID::PointerTyID: + return "PointerTy"; + case Type::TypeID::PPC_FP128TyID: + return "PPC_FP128Ty"; + case Type::TypeID::ScalableVectorTyID: + return "ScalableVectorTy"; + case Type::TypeID::StructTyID: + return "StructTy"; + case Type::TypeID::TokenTyID: + return "TokenTy"; + case Type::TypeID::VoidTyID: + return "VoidTy"; + case Type::TypeID::X86_AMXTyID: + return "X86_AMXTy"; + case Type::TypeID::X86_FP80TyID: + return "X86_FP80Ty"; + case Type::TypeID::X86_MMXTyID: + return "X86_MMXTy"; + } + // should not reach here + return nullptr; +} +// New PM implementation + +// TODO: assign Misses to CacheState +// TODO: Find longest Path, LPsolve? +// TODO: Sum up Cache misses over longest path. +struct CacheAnalysisPass : PassInfoMixin { + + // Development Options + bool PrintAddresses = false; + bool PrintEdges = false; + bool PrintEdgesPost = false; + bool DumpToDot = false; + bool DumpNodes = false; + + // Assume a 4kB Cache + // with 16 Sets, associativity of 4 and Cachelines fitting two + CacheType Cache = CacheType(16, 4, 128); + StringRef EntryPoint = "main"; + unsigned int EntryAddress; + unsigned int AddressCounter = 0b100000; + // assume 8 Bit addressed 64 Bit instructions. + std::map Addr2Value; + std::map Value2Addr; + + AbstractCache AC; + // TODO mark visit ed F's BB's and Inst's + std::map VisitedFunctions; + + unsigned int stringRefToInt(StringRef SR) { + unsigned int Length = SR.size(); + unsigned int ret = 0; + unsigned int Count = 1; + for (char C : SR) { + unsigned int Factor = (unsigned int)pow(10, (Length - Count++)); + switch (C) { + case '0': + break; + case '1': + ret += Factor; + break; + case '2': + ret += 2 * Factor; + break; + case '3': + ret += 3 * Factor; + break; + case '4': + ret += 4 * Factor; + break; + case '5': + ret += 5 * Factor; + break; + case '6': + ret += 6 * Factor; + break; + case '7': + ret += 7 * Factor; + break; + case '8': + ret += 8 * Factor; + break; + case '9': + ret += 9 * Factor; + break; + default: + errs() << "StringRef is not a decimal number"; + }; + } + return ret; + } + + void address_collector(Module &M) { + for (Function &F : M) { + if (F.getName().equals(EntryPoint)) { + EntryAddress = AddressCounter; + if (PrintAddresses) + outs() << "Found main at PseudoAddress: " << EntryAddress << " \n"; + } + unsigned int InstCounter = 0; + for (BasicBlock &BB : F) { + for (Instruction &Inst : BB) { + AC.addEmptyNode(AddressCounter); + Addr2Value[AddressCounter] = &Inst; + Value2Addr[&Inst] = AddressCounter; + AddressCounter += 1; + InstCounter++; + } + } + } + } + + void address_printer(Function &F) { + outs() << "F: " << Value2Addr[&F] << ".\n"; + for (BasicBlock &BB : F) { + outs() << "-BB: " << Value2Addr[&BB] << "\n"; + for (Instruction &Inst : BB) { + outs() << "--InstAddress:" << Value2Addr[&Inst] << "\n"; + } + } + } + + void init_edges(Function &F) { + for (BasicBlock &BB : F) { + // Collect Controll flow in F + for (auto Pred : predecessors(&BB)) { + AC.addEdge(Value2Addr[&Pred->getInstList().back()], + Value2Addr[&BB.getInstList().front()]); + if (PrintEdges) + outs() << Value2Addr[&Pred->getInstList().back()] << " -> " + << Value2Addr[&BB.getInstList().front()] << "\n"; + } + Instruction *PrevInst = nullptr; + for (Instruction &Inst : BB) { + // Collect function Calls in F=main + if (CallInst *Caller = dyn_cast(&Inst)) { + Function *Callee = Caller->getCalledFunction(); + if (PrintEdges) + outs() << "F: " << Callee->getName() << "\n" + << "Inst: " << Caller->getName() << "\n"; + if (Callee != NULL) { + // Add edge on Function Call + AC.addEdge(Value2Addr[&Inst], + Value2Addr[&Callee->getBasicBlockList() + .front() + .getInstList() + .front()]); + // Add edge on Function return + AC.addEdge( + Value2Addr + [&Callee->getBasicBlockList().back().getInstList().back()], + Value2Addr[&Inst]); + + if (PrintEdges) { + // Printing edge on Function Call + outs() << Callee->getName() << ": "; + outs() << Value2Addr[&Inst] << " -> " + << Value2Addr[&Callee->getBasicBlockList() + .front() + .getInstList() + .front()] + << "\n"; + // Printing edge on Function return + outs() << Callee->getName() << ": "; + outs() << Value2Addr[&Callee->getBasicBlockList() + .back() + .getInstList() + .back()] + << " -> " << Value2Addr[&Inst] << "\n"; + } + + // Resume CFG construction in called function + if (VisitedFunctions.find(Callee) == VisitedFunctions.end()) { + VisitedFunctions[Callee] = true; + init_edges(*Callee); + } + PrevInst = nullptr; + if (PrintEdges) + outs() << "Back from " << Callee->getName() << "\n"; + } + } + if (PrevInst != nullptr) { + AC.addEdge(Value2Addr[PrevInst], Value2Addr[&Inst]); + if (PrintEdges) + outs() << Value2Addr[PrevInst] << " -> " << Value2Addr[&Inst] + << "\n"; + } + PrevInst = &Inst; + } + } + } + + PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM) { + FunctionAnalysisManager &FAM = + MAM.getResult(M).getManager(); + + address_collector(M); + Function *EntryFunction; + for (Function &F : M.getFunctionList()) { + // Start iterating through CFG from entry point + if (F.getName().equals(EntryPoint)) { + EntryFunction = &F; + init_edges(F); + } + if (PrintAddresses) + address_printer(F); + } + if (PrintEdgesPost) + AC.dumpEdges(); + if (DumpToDot) + AC.dumpDotFile(); + AC.fillAbstractCache(EntryAddress); + if (DumpNodes) + AC.dumpNodes(); + outs() << "MustHits: " << AC.collectHits() << "\n"; + outs() << "MayMisses: " << AC.collectMisses() << "\n"; + return PreservedAnalyses::all(); + } +}; + +} // namespace + +//----------------------------------------------------------------------------- +// New PM Registration +//----------------------------------------------------------------------------- +llvm::PassPluginLibraryInfo getCacheAnalysisPassPluginInfo() { + return {LLVM_PLUGIN_API_VERSION, "CacheAnalysisPass", LLVM_VERSION_STRING, + [](PassBuilder &PB) { + PB.registerPipelineParsingCallback( + [](StringRef Name, ModulePassManager &MPM, + ArrayRef) { + if (Name == "lru-misses") { + MPM.addPass(CacheAnalysisPass()); + return true; // only looks at CFG + } + return false; // Analysis pass. + }); + }}; +} + +// This is the core interface for pass plugins. It guarantees that 'opt' will +// be able to recognize CacheAnalysisPass when added to the pass pipeline on +// the command line, i.e. via '-passes=lru-misses' +extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo +llvmGetPassPluginInfo() { + return getCacheAnalysisPassPluginInfo(); +} diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..7b675b5 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,21 @@ +FROM archlinux + + +# 1. INSTALL DEPENDENCIES +RUN pacman -Syu --noconfirm \ + git \ + cmake \ + ninja \ + gcc \ + llvm \ + clang \ + gdb \ + python-pip \ + fish \ + zsh + +# 2. INSTALL LIT +RUN pip3 install lit + + + diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..2a91933 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2022 Nils Hoelscher + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..910b025 --- /dev/null +++ b/README.md @@ -0,0 +1,86 @@ +# RTSA-lab01-CacheAnalysis + +In this lab session you will learn how to implement a LRU cache in abstract representation. +The Goal is to implement an LRU must Join in include/AbstractState.h +The Project can build, tested and Evaluated with the Helper Script. + +## Diclaimer + +This is the first time we provide this exercise. +Should you encounter something you think is a Bug, please let me know, during lab sessions. + +Also keep track of the Repository as I may add more features to the script. + +## Setup + +We recommend using docker and VS Code for setup. +If this is not your preferred Setup, take a look in the Docker file for the dependencies. + +Also we do not support the usage of Windows, Linux is free of charge so get a copy. + +### Setting Docker up: + +1.) install docker and VS Code on your Distribution. + +https://docs.docker.com/get-docker/ + +https://code.visualstudio.com/ + +For this setup you cannot use the OSS version of VS code or the version from Snap, as the remote development extensions will not work. + +2.) We recommend you install the following extensions in vs code + +C/C++, +clangd, +Docker and +Remote Development + +3.) Use the helper script to build and run a Container + + $ ./helper.sh docker + +This will build a docker image and run a Docker container with the current directory mounted. + +4.) Attach VS Code to the container, in the Docker Tab, and start developing + +## Debugging + +When you are using VS Code you can simply use the Debugging Tab, we prepared a debug script for you. + +## Use the Helper script + +### Initial Setup: + + $ ./helper.sh all + +To get a list of what the helper script can do simply type + + $ ./helper.sh + +### Run: +Run the pass on a single test. +fft1 is recommended during development. + + $ ./helper.sh run fft1 + +### Eval: +Runs the Pass on a set of tests and also prints the expected results. +This will be used to measure correctness of you implementation. + + $ ./helper.sh eval + +## Use the Terminal (Obsolete if script is used) + +This section is not needed, fi you are using the script but for the sake of completeness it is provided anyways. + +Initial Setup: + + $ mkdir build + $ cd build + $ cmake -DLT_LLVM_INSTALL_DIR=$LLVM_DIR ../CacheAnalysisPass/ + $ make + $ cd .. + +Run: + + $ opt -load-pass-plugin build/libCacheAnalysisPass.so -passes=lru-misses test/crc.ll \ No newline at end of file diff --git a/helper.sh b/helper.sh new file mode 100755 index 0000000..8dd40a6 --- /dev/null +++ b/helper.sh @@ -0,0 +1,172 @@ +#!/bin/bash + +clean () { + echo "==== Cleaning build folder ====" + rm -rf build/ +} + +config () { + echo "==== Crating build folder ====" + mkdir build + cd build + echo "==== Configuring cmake ====" + cmake -G Ninja -DCMAKE_BUILD_TYPE=Debug -DLT_LLVM_INSTALL_DIR=$LLVM_DIR ../CacheAnalysisPass/ + echo "==== Done! ====" +} + +compile () { + cd build + echo "==== Compiling Project ====" + ninja + cd .. + echo "==== Done! ====" +} + +run () { + echo "==== Running $1 ====" + opt -load-pass-plugin build/libCacheAnalysisPass.so \ + -passes='lru-misses(function(loop-unroll-and-jam))' \ + test/$1.ll -o /dev/null + #llvm-dis < out.bc > out.ll +} + +allBenchs=( "adpcm" + "bs" + "bsort100" + "cnt" + "compress" + "cover" + "crc" + "dijkstra" + "duff" + "edn" + "expint" + "fdct" + "fft1" + "fibcall" + "fir" + "hello" + "insertsort" + "janne_complex" + "jfdctint" + "lcdnum" + "lms" + "ludcmp" + "matmult" + "minver" + "ndes" + "nsichneu" + "ns" + "prime" + "qsort-exam" + "qurt" + "recursion" + "select" + "sqrt" + "statemate" + "ud" + "whet" +) + +runall () { + for str in ${allBenchs[@]}; do + echo + run $str + done +} + +case $1 in + clean) + clean + ;; + config) + config + ;; + c | compile) + compile + ;; + cr) + compile + if [ $2 ]; then + run $2 + else + echo "==== Please provide name of the test as second argument! ====" + fi + ;; + r | run) + if [ $2 ]; then + run $2 + else + echo "==== Please provide name of the test as second argument! ====" + fi + ;; + ra | runall) + runall + ;; + docker) + docker build -t rtsalab01cacheanalysis:latest . + docker run -i -d -v "$(pwd)"/.:/root:rw --name RTSAlab01 rtsalab01cacheanalysis + ;; + evaluation | eval) + run "fft1" + echo "==== Correct fft1 ====" + echo "MustHits: 16" + echo "MayMisses: 280" + echo + run "bsort100" + echo "==== Correct bsort100 ====" + echo "MustHits: 1" + echo "MayMisses: 41" + echo + run "lms" + echo "==== Correct lms ====" + echo "MustHits: 5" + echo "MayMisses: 288" + echo + run "minver" + echo "==== Correct minver ====" + echo "MustHits: 6" + echo "MayMisses: 224" + echo + run "qsort-exam" + echo "==== Correct qsort-exam ====" + echo "MustHits: 2" + echo "MayMisses: 152" + echo + run "recursion" + echo "==== Correct recursion ====" + echo "MustHits: 8" + echo "MayMisses: 8" + echo + run "select" + echo "==== Correct select ====" + echo "MustHits: 4" + echo "MayMisses: 108" + echo + run "whet" + echo "==== Correct whet ====" + echo "MustHits: 5" + echo "MayMisses: 265" + echo + ;; + a | all) + clean + config + ninja + echo "==== Done! ====" + ;; + *) + if [ $1 ]; then + echo "Unknown argument: $1" + fi + echo "Please provide one of the following arguments:" + echo " clean Deletes the build folder" + echo " config Creates build folder and configures build System" + echo " c | compile Compiles the Project" + echo " a | all Cleans, configures and compiles the project" + echo " r | run [name] Run pass on test/[name] from the test folder" + echo " cr [name] Compile and run pass on test/[name] from the test folder" + echo " ra | runall Run pass on all tests from the test folder" + exit + ;; +esac diff --git a/include/AbstractCache.h b/include/AbstractCache.h new file mode 100644 index 0000000..73cf13a --- /dev/null +++ b/include/AbstractCache.h @@ -0,0 +1,128 @@ + +#ifndef ABSTRACHTCACHESTATE_H +#define ABSTRACHTCACHESTATE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "AbstractState.h" +#include "Address.h" +#include "ConcreteState.h" + +// Forward declarations + +namespace cacheAnaPass { +class AbstractCache; + +} // namespace cacheAnaPass + +class AbstractCache { +public: // everything is public, because IDGAF + // map keys are instruction Addresses. + std::map> Edges; + std::map Nodes; + + AbstractCache() {} + + /** + * @brief Add an Edge to the Abstract Cache + * + * @param Pre Predecessor Address + * @param Suc Successor Address + */ + void addEdge(unsigned int Pre, unsigned int Suc) { + Edges[Pre].push_back(Suc); + Nodes[Pre].Successors.push_back(Suc); + Nodes[Suc].Predecessors.push_back(Pre); + } + + void addEmptyNode(unsigned int NodeAddr) { + Nodes[NodeAddr] = AbstractState(NodeAddr); + } + + void fillAbstractCache(unsigned int NodeNr) { + Nodes[NodeNr].computed = true; + for (unsigned int SuccNr : Nodes[NodeNr].Successors) { + Nodes[SuccNr]; + if (Nodes[SuccNr].computed) { + // Join don't call + Nodes[SuccNr].mustJoin(Nodes[NodeNr]); + Nodes[SuccNr].mustJoin(AbstractState(NodeNr)); + } else { + // Update and fill Succ + Nodes[SuccNr].fill(Nodes[NodeNr], NodeNr); + fillAbstractCache(SuccNr); + } + } + return; + } + + unsigned int collectHits() { + unsigned int Hits = 0; + for (auto const &E : Edges) { + auto predecessor = Nodes[E.first]; + for (unsigned int SuccessorAddr : E.second) { + // When successors Address is in predecessor, we have a Hit. + Hits += predecessor.isHit(Address(SuccessorAddr)) ? 1 : 0; + } + } + return Hits; + } + + unsigned int collectMisses() { + unsigned int Misses = 0; + for (auto const &E : Edges) { + auto predecessor = Nodes[E.first]; + for (unsigned int SuccessorAddr : E.second) { + // When successors Address is in predecessor, we have a Hit. + Misses += predecessor.isHit(Address(SuccessorAddr)) ? 0 : 1; + } + } + return Misses; + } + + void dumpEdges() { + llvm::outs() << "Dumping Edges:\n"; + for (auto const &E : Edges) { + llvm::outs() << E.first; + bool FirstPrint = true; + for (unsigned int To : E.second) { + if (FirstPrint) { + llvm::outs() << " -> " << To; + FirstPrint = false; + } else { + llvm::outs() << ", " << To; + } + } + llvm::outs() << "\n"; + } + } + + void dumpDotFile() { + std::ofstream DotFile; + DotFile.open("out.dot"); + DotFile << "digraph g {" + << "\n"; + for (auto const &E : Edges) { + for (unsigned int To : E.second) { + DotFile << E.first << " -> " << To << "\n"; + } + } + DotFile << "}\n"; + DotFile.close(); + } + + void dumpNodes() { + for (auto const &E : Edges) { + Nodes[E.first].dump(); + } + } +}; // namespace +#endif // ABSTRACHTCACHESTATE_H \ No newline at end of file diff --git a/include/AbstractState.h b/include/AbstractState.h new file mode 100644 index 0000000..7883904 --- /dev/null +++ b/include/AbstractState.h @@ -0,0 +1,192 @@ +#ifndef ABSSTATE_H +#define ABSSTATE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "Address.h" +// Forward declarations + +namespace cacheAnaPass { +class AbstractState; + +} // namespace cacheAnaPass + +class AbstractState { +public: // everything is public, because IDGAF + std::list Successors; + std::list Predecessors; + + unsigned int Addr; + + bool computed = false; + + // Only entries below this comment are needed for the exercise. + + /** + * @brief Containing all Abstract Cache Tags. + * Key of the list has no Meaning. + * + */ + struct Entry { + std::list Blocks; + }; + + /** + * @brief Cache Set, Key is the Age of the Entries. + * + */ + struct Set { + // uInt in this map is the Age. + std::map Associativity; + }; + + /** + * @brief Cache Sets, key is the Set number [0-15], derived from Address. + * + */ + std::map Sets; + + AbstractState(AbstractState const &Copy) { + for (auto S : Copy.Sets) { + unsigned int SetNr = S.first; + for (auto E : S.second.Associativity) { + unsigned int Age = E.first; + for (auto B : E.second.Blocks) { + Sets[SetNr].Associativity[Age].Blocks.push_back(B); + } + } + } + } + + AbstractState() {} + + AbstractState(unsigned int AddressIn) { Addr = AddressIn; } + + AbstractState(Address Addr) { Sets[Addr.Index].Associativity[0] = {{Addr.Tag}}; } + + + + /** + * @brief Executes an Must LRU Join on the AbstractCacheState + * + * @param In, AbstractState that gets joined into the State. + */ + void mustJoin(AbstractState In) { + /** + * The exercise is to Fill this function with an LRU must Join. + * For this you need to use Sets. Associativity and Blocks. + */ + } + + /** + * @brief Checks if Address Addr is in Cache + * + * @param Addr Address to check. + * @return true CacheState contains Address Addr + * @return false CacheState does not contain Address Addr + */ + bool isHit(Address Addr) { + for (auto E : Sets[Addr.Index].Associativity) { + for (auto B : E.second.Blocks) { + if (B == Addr.Tag) + return true; + } + } + return false; + } + + /** + * @brief Updates the AbstractState with given Address + * + * @param Addr , Address + */ + void update(Address Addr) { + for (int i = 3; i > 0; i--) { + Sets[Addr.Index].Associativity[i] = Sets[Addr.Index].Associativity[i - 1]; + } + Sets[Addr.Index].Associativity[0].Blocks = {Addr.Tag}; + } + + /** + * @brief Updates the AbstractState with given AbstractState + * + * @param UpdateState, State that gets merged into State with Age+1. + */ + void update(AbstractState UpdateState) { + for (auto S : UpdateState.Sets) { + unsigned int Index = S.first; + for (auto E : S.second.Associativity) { + unsigned int Age = E.first + 1; + // If updated age is greater 4 The Tag is no longer in Cache. + // Due to associativity of 4 per set. + if (Age >= 4) + break; + for (auto B : E.second.Blocks) { + Sets[Index].Associativity[Age].Blocks.push_back(B); + } + } + } + } + + /** + * @brief Fills the AbstractState PreState and PreAddress. + * + * @param PreState, State that fills this state. + * + * @param PreAddr Address of PreState + */ + void fill(AbstractState PreState, Address PreAddr) { + for (auto S : PreState.Sets) { + unsigned int Index = S.first; + for (auto E : S.second.Associativity) { + unsigned int Age = E.first + 1; + // If updated age is greater 4 The Tag is no longer in Cache. + // Due to associativity of 4 per set. + if (Age >= 4) + break; + for (auto B : E.second.Blocks) { + Sets[Index].Associativity[Age].Blocks.push_back(B); + } + } + } + Sets[PreAddr.Index].Associativity[0].Blocks.push_back(PreAddr.Tag); + } + + void dump() { + llvm::outs() << Addr << " {\n"; + llvm::outs() << "Predecessors: "; + for (auto PreNr : Predecessors) { + llvm::outs() << PreNr << " "; + } + llvm::outs() << "\n"; + + llvm::outs() << "Successors: "; + for (auto SuccNr : Successors) { + llvm::outs() << SuccNr << " "; + } + llvm::outs() << "\n"; + + for (auto SetPair : Sets) { + llvm::outs() << "Set[" << SetPair.first << "]: \n"; + for (auto EntryPair : SetPair.second.Associativity) { + llvm::outs() << " Age[" << EntryPair.first << "]: "; + for (auto Block : EntryPair.second.Blocks) { + llvm::outs() << Block << " "; + } + llvm::outs() << "\n"; + } + } + llvm::outs() << "}\n"; + } + +}; // namespace +#endif // STATE_H \ No newline at end of file diff --git a/include/Address.h b/include/Address.h new file mode 100644 index 0000000..8f2f6e2 --- /dev/null +++ b/include/Address.h @@ -0,0 +1,30 @@ +#ifndef ADDRESS_H +#define ADDRESS_H + +// Forward declarations +#include +namespace cacheAnaPass { +class Address; + +} // namespace cacheAnaPass + +class Address { +public: // everything is public, because IDGAF + unsigned int Addr; + unsigned int Offset; + unsigned int Index; + unsigned int Tag; + + // Object holding Tag, Index and Offset for a 16Sets, 4Assoc ,2CL Cache + Address(unsigned int Addr) { + this->Addr = Addr; + // Ignoring Offset + this->Offset = Addr & 0b1; + assert(Offset <= 1); + this->Index = (Addr & 0b11110) >> 1; + assert(Index <= 15); + this->Tag = Addr >> 5; + } + +}; // namespace +#endif // STATE_H \ No newline at end of file diff --git a/include/CacheType.h b/include/CacheType.h new file mode 100644 index 0000000..e2dab1d --- /dev/null +++ b/include/CacheType.h @@ -0,0 +1,44 @@ +#ifndef CACHETYPE_H +#define CACHETYPE_H +#include + +// Forward declarations + +namespace cacheAnaPass { +class CacheType; + +} // namespace cacheAnaPass + +/** + * @brief Class to Check if Set Associate Cache is valid. + * + */ +class CacheType { +public: + bool isPower2(int n) { return n && !(n & (n - 1)); } + + int Sets; // n Sets + int Associativity; // m Associativity + int LineSize; // In Bits + int CacheSize; // In Bits, n*m*Linesize + + /** + * @brief Construct a new Cache Type object + * + * @param Sets + * @param Associativity + * @param Linesize + */ + CacheType(unsigned int Sets, unsigned int Associativity, + unsigned int Linesize) { + assert(isPower2(Sets)); + assert(isPower2(Associativity)); + assert(isPower2(Linesize)); + this->Sets = Sets; + this->Associativity = Associativity; + this->LineSize = Linesize; + this->CacheSize = Sets * Associativity * Linesize; + } + CacheType(); +}; +#endif // CACHETYPE_H \ No newline at end of file diff --git a/include/ConcreteState.h b/include/ConcreteState.h new file mode 100644 index 0000000..24958e1 --- /dev/null +++ b/include/ConcreteState.h @@ -0,0 +1,41 @@ +#ifndef CONCRETESTATE_H +#define CONCRETESTATE_H + +//Currently Unused. + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "Address.h" +// Forward declarations + +namespace cacheAnaPass { +class ConcreteState; + +} // namespace cacheAnaPass + +class ConcreteState { +public: // everything is public, because IDGAF + struct Entry { + unsigned int Block; + unsigned int Age; + }; + + struct Set { + std::map Entries; + }; + + std::map Sets; + + ConcreteState(Address Addr) { + Sets[Addr.Index].Entries[0]= Addr.Tag; + } + +}; // namespace +#endif // CONCRETESTATE_H \ No newline at end of file diff --git a/test/adpcm.ll b/test/adpcm.ll new file mode 100644 index 0000000..44c1f2c --- /dev/null +++ b/test/adpcm.ll @@ -0,0 +1,1584 @@ +; ModuleID = 'adpcm.c' +source_filename = "adpcm.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +@h = dso_local local_unnamed_addr global [24 x i32] [i32 12, i32 -44, i32 -44, i32 212, i32 48, i32 -624, i32 128, i32 1448, i32 -840, i32 -3220, i32 3804, i32 15504, i32 15504, i32 3804, i32 -3220, i32 -840, i32 1448, i32 128, i32 -624, i32 48, i32 212, i32 -44, i32 -44, i32 12], align 16 +@qq4_code4_table = dso_local local_unnamed_addr global [16 x i32] [i32 0, i32 -20456, i32 -12896, i32 -8968, i32 -6288, i32 -4240, i32 -2584, i32 -1200, i32 20456, i32 12896, i32 8968, i32 6288, i32 4240, i32 2584, i32 1200, i32 0], align 16 +@qq5_code5_table = dso_local local_unnamed_addr global [32 x i32] [i32 -280, i32 -280, i32 -23352, i32 -17560, i32 -14120, i32 -11664, i32 -9752, i32 -8184, i32 -6864, i32 -5712, i32 -4696, i32 -3784, i32 -2960, i32 -2208, i32 -1520, i32 -880, i32 23352, i32 17560, i32 14120, i32 11664, i32 9752, i32 8184, i32 6864, i32 5712, i32 4696, i32 3784, i32 2960, i32 2208, i32 1520, i32 880, i32 280, i32 -280], align 16 +@qq6_code6_table = dso_local local_unnamed_addr global [64 x i32] [i32 -136, i32 -136, i32 -136, i32 -136, i32 -24808, i32 -21904, i32 -19008, i32 -16704, i32 -14984, i32 -13512, i32 -12280, i32 -11192, i32 -10232, i32 -9360, i32 -8576, i32 -7856, i32 -7192, i32 -6576, i32 -6000, i32 -5456, i32 -4944, i32 -4464, i32 -4008, i32 -3576, i32 -3168, i32 -2776, i32 -2400, i32 -2032, i32 -1688, i32 -1360, i32 -1040, i32 -728, i32 24808, i32 21904, i32 19008, i32 16704, i32 14984, i32 13512, i32 12280, i32 11192, i32 10232, i32 9360, i32 8576, i32 7856, i32 7192, i32 6576, i32 6000, i32 5456, i32 4944, i32 4464, i32 4008, i32 3576, i32 3168, i32 2776, i32 2400, i32 2032, i32 1688, i32 1360, i32 1040, i32 728, i32 432, i32 136, i32 -432, i32 -136], align 16 +@wl_code_table = dso_local local_unnamed_addr global [16 x i32] [i32 -60, i32 3042, i32 1198, i32 538, i32 334, i32 172, i32 58, i32 -30, i32 3042, i32 1198, i32 538, i32 334, i32 172, i32 58, i32 -30, i32 -60], align 16 +@wl_table = dso_local local_unnamed_addr global [8 x i32] [i32 -60, i32 -30, i32 58, i32 172, i32 334, i32 538, i32 1198, i32 3042], align 16 +@ilb_table = dso_local local_unnamed_addr global [32 x i32] [i32 2048, i32 2093, i32 2139, i32 2186, i32 2233, i32 2282, i32 2332, i32 2383, i32 2435, i32 2489, i32 2543, i32 2599, i32 2656, i32 2714, i32 2774, i32 2834, i32 2896, i32 2960, i32 3025, i32 3091, i32 3158, i32 3228, i32 3298, i32 3371, i32 3444, i32 3520, i32 3597, i32 3676, i32 3756, i32 3838, i32 3922, i32 4008], align 16 +@decis_levl = dso_local local_unnamed_addr global [30 x i32] [i32 280, i32 576, i32 880, i32 1200, i32 1520, i32 1864, i32 2208, i32 2584, i32 2960, i32 3376, i32 3784, i32 4240, i32 4696, i32 5200, i32 5712, i32 6288, i32 6864, i32 7520, i32 8184, i32 8968, i32 9752, i32 10712, i32 11664, i32 12896, i32 14120, i32 15840, i32 17560, i32 20456, i32 23352, i32 32767], align 16 +@quant26bt_pos = dso_local local_unnamed_addr global [31 x i32] [i32 61, i32 60, i32 59, i32 58, i32 57, i32 56, i32 55, i32 54, i32 53, i32 52, i32 51, i32 50, i32 49, i32 48, i32 47, i32 46, i32 45, i32 44, i32 43, i32 42, i32 41, i32 40, i32 39, i32 38, i32 37, i32 36, i32 35, i32 34, i32 33, i32 32, i32 32], align 16 +@quant26bt_neg = dso_local local_unnamed_addr global [31 x i32] [i32 63, i32 62, i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 4], align 16 +@qq2_code2_table = dso_local local_unnamed_addr global [4 x i32] [i32 -7408, i32 -1616, i32 7408, i32 1616], align 16 +@wh_code_table = dso_local local_unnamed_addr global [4 x i32] [i32 798, i32 -214, i32 798, i32 -214], align 16 +@tqmf = dso_local local_unnamed_addr global [24 x i32] zeroinitializer, align 16 +@xl = dso_local local_unnamed_addr global i32 0, align 4 +@xh = dso_local local_unnamed_addr global i32 0, align 4 +@delay_bpl = dso_local local_unnamed_addr global [6 x i32] zeroinitializer, align 16 +@delay_dltx = dso_local local_unnamed_addr global [6 x i32] zeroinitializer, align 16 +@szl = dso_local local_unnamed_addr global i32 0, align 4 +@rlt1 = dso_local local_unnamed_addr global i32 0, align 4 +@al1 = dso_local local_unnamed_addr global i32 0, align 4 +@rlt2 = dso_local local_unnamed_addr global i32 0, align 4 +@al2 = dso_local local_unnamed_addr global i32 0, align 4 +@spl = dso_local local_unnamed_addr global i32 0, align 4 +@sl = dso_local local_unnamed_addr global i32 0, align 4 +@el = dso_local local_unnamed_addr global i32 0, align 4 +@detl = dso_local local_unnamed_addr global i32 0, align 4 +@il = dso_local local_unnamed_addr global i32 0, align 4 +@dlt = dso_local local_unnamed_addr global i32 0, align 4 +@nbl = dso_local local_unnamed_addr global i32 0, align 4 +@plt = dso_local local_unnamed_addr global i32 0, align 4 +@plt1 = dso_local local_unnamed_addr global i32 0, align 4 +@plt2 = dso_local local_unnamed_addr global i32 0, align 4 +@rlt = dso_local local_unnamed_addr global i32 0, align 4 +@delay_bph = dso_local local_unnamed_addr global [6 x i32] zeroinitializer, align 16 +@delay_dhx = dso_local local_unnamed_addr global [6 x i32] zeroinitializer, align 16 +@szh = dso_local local_unnamed_addr global i32 0, align 4 +@rh1 = dso_local local_unnamed_addr global i32 0, align 4 +@ah1 = dso_local local_unnamed_addr global i32 0, align 4 +@rh2 = dso_local local_unnamed_addr global i32 0, align 4 +@ah2 = dso_local local_unnamed_addr global i32 0, align 4 +@sph = dso_local local_unnamed_addr global i32 0, align 4 +@sh = dso_local local_unnamed_addr global i32 0, align 4 +@eh = dso_local local_unnamed_addr global i32 0, align 4 +@ih = dso_local local_unnamed_addr global i32 0, align 4 +@deth = dso_local local_unnamed_addr global i32 0, align 4 +@dh = dso_local local_unnamed_addr global i32 0, align 4 +@nbh = dso_local local_unnamed_addr global i32 0, align 4 +@ph = dso_local local_unnamed_addr global i32 0, align 4 +@ph1 = dso_local local_unnamed_addr global i32 0, align 4 +@ph2 = dso_local local_unnamed_addr global i32 0, align 4 +@yh = dso_local local_unnamed_addr global i32 0, align 4 +@ilr = dso_local local_unnamed_addr global i32 0, align 4 +@dec_del_bpl = dso_local local_unnamed_addr global [6 x i32] zeroinitializer, align 16 +@dec_del_dltx = dso_local local_unnamed_addr global [6 x i32] zeroinitializer, align 16 +@dec_szl = dso_local local_unnamed_addr global i32 0, align 4 +@dec_rlt1 = dso_local local_unnamed_addr global i32 0, align 4 +@dec_al1 = dso_local local_unnamed_addr global i32 0, align 4 +@dec_rlt2 = dso_local local_unnamed_addr global i32 0, align 4 +@dec_al2 = dso_local local_unnamed_addr global i32 0, align 4 +@dec_spl = dso_local local_unnamed_addr global i32 0, align 4 +@dec_sl = dso_local local_unnamed_addr global i32 0, align 4 +@dec_detl = dso_local local_unnamed_addr global i32 0, align 4 +@dec_dlt = dso_local local_unnamed_addr global i32 0, align 4 +@dl = dso_local local_unnamed_addr global i32 0, align 4 +@rl = dso_local local_unnamed_addr global i32 0, align 4 +@dec_nbl = dso_local local_unnamed_addr global i32 0, align 4 +@dec_plt = dso_local local_unnamed_addr global i32 0, align 4 +@dec_plt1 = dso_local local_unnamed_addr global i32 0, align 4 +@dec_plt2 = dso_local local_unnamed_addr global i32 0, align 4 +@dec_rlt = dso_local local_unnamed_addr global i32 0, align 4 +@dec_del_bph = dso_local local_unnamed_addr global [6 x i32] zeroinitializer, align 16 +@dec_del_dhx = dso_local local_unnamed_addr global [6 x i32] zeroinitializer, align 16 +@dec_szh = dso_local local_unnamed_addr global i32 0, align 4 +@dec_rh1 = dso_local local_unnamed_addr global i32 0, align 4 +@dec_ah1 = dso_local local_unnamed_addr global i32 0, align 4 +@dec_rh2 = dso_local local_unnamed_addr global i32 0, align 4 +@dec_ah2 = dso_local local_unnamed_addr global i32 0, align 4 +@dec_sph = dso_local local_unnamed_addr global i32 0, align 4 +@dec_sh = dso_local local_unnamed_addr global i32 0, align 4 +@dec_deth = dso_local local_unnamed_addr global i32 0, align 4 +@dec_dh = dso_local local_unnamed_addr global i32 0, align 4 +@dec_nbh = dso_local local_unnamed_addr global i32 0, align 4 +@dec_ph = dso_local local_unnamed_addr global i32 0, align 4 +@dec_ph1 = dso_local local_unnamed_addr global i32 0, align 4 +@dec_ph2 = dso_local local_unnamed_addr global i32 0, align 4 +@rh = dso_local local_unnamed_addr global i32 0, align 4 +@xd = dso_local local_unnamed_addr global i32 0, align 4 +@xs = dso_local local_unnamed_addr global i32 0, align 4 +@accumc = dso_local local_unnamed_addr global [11 x i32] zeroinitializer, align 16 +@accumd = dso_local local_unnamed_addr global [11 x i32] zeroinitializer, align 16 +@xout1 = dso_local local_unnamed_addr global i32 0, align 4 +@xout2 = dso_local local_unnamed_addr global i32 0, align 4 +@main.test_data = internal unnamed_addr global [6 x i32] zeroinitializer, align 16 +@main.compressed = internal unnamed_addr global [3 x i32] zeroinitializer, align 4 +@main.result = internal unnamed_addr global [6 x i32] zeroinitializer, align 16 +@rs = dso_local local_unnamed_addr global i32 0, align 4 +@yl = dso_local local_unnamed_addr global i32 0, align 4 +@dec_yh = dso_local local_unnamed_addr global i32 0, align 4 +@dec_rh = dso_local local_unnamed_addr global i32 0, align 4 + +; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone sspstrong uwtable willreturn +define dso_local i32 @my_abs(i32 %0) local_unnamed_addr #0 { + %2 = call i32 @llvm.abs.i32(i32 %0, i1 true) + ret i32 %2 +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone sspstrong uwtable willreturn +define dso_local i32 @my_fabs(i32 %0) local_unnamed_addr #0 { + %2 = call i32 @llvm.abs.i32(i32 %0, i1 true) + ret i32 %2 +} + +; Function Attrs: nofree nosync nounwind readnone sspstrong uwtable +define dso_local i32 @my_sin(i32 %0) local_unnamed_addr #1 { + %2 = add i32 %0, 6281 + %3 = call i32 @llvm.smin.i32(i32 %0, i32 6282) + %4 = sub i32 %2, %3 + %5 = urem i32 %4, 6282 + %6 = sub i32 %4, %5 + %7 = sub i32 %0, %6 + %8 = call i32 @llvm.smax.i32(i32 %7, i32 -6282) + %9 = add i32 %8, %6 + %10 = sub i32 %9, %0 + %11 = icmp ne i32 %10, 0 + %12 = zext i1 %11 to i32 + %13 = sub i32 %10, %12 + %14 = udiv i32 %13, 6282 + %15 = add nuw nsw i32 %14, %12 + %16 = mul i32 %15, 6282 + %17 = add i32 %16, %0 + %18 = sub i32 %17, %6 + %19 = mul i32 %18, %18 + %20 = sub i32 0, %19 + %21 = mul nsw i32 %18, %20 + %22 = sdiv i32 %21, 6 + %23 = add nsw i32 %22, %18 + %24 = add i32 %21, 5 + %25 = icmp ult i32 %24, 11 + br i1 %25, label %38, label %26 + +26: ; preds = %1, %26 + %27 = phi i32 [ %36, %26 ], [ 2, %1 ] + %28 = phi i32 [ %35, %26 ], [ %23, %1 ] + %29 = phi i32 [ %34, %26 ], [ %22, %1 ] + %30 = mul nsw i32 %29, %20 + %31 = shl nuw nsw i32 %27, 1 + %32 = or i32 %31, 1 + %33 = mul nsw i32 %32, %31 + %34 = sdiv i32 %30, %33 + %35 = add nsw i32 %34, %28 + %36 = add nuw nsw i32 %27, 1 + %37 = icmp eq i32 %34, 0 + br i1 %37, label %38, label %26, !llvm.loop !5 + +38: ; preds = %26, %1 + %39 = phi i32 [ %23, %1 ], [ %35, %26 ] + ret i32 %39 +} + +; Function Attrs: nofree nosync nounwind readnone sspstrong uwtable +define dso_local i32 @my_cos(i32 %0) local_unnamed_addr #1 { + %2 = add i32 %0, -1570 + %3 = sub nsw i32 1570, %0 + %4 = call i32 @llvm.smin.i32(i32 %3, i32 6282) #11 + %5 = add i32 %4, %0 + %6 = sub i32 7851, %5 + %7 = urem i32 %6, 6282 + %8 = sub i32 %6, %7 + %9 = sub i32 %3, %8 + %10 = call i32 @llvm.smax.i32(i32 %9, i32 -6282) #11 + %11 = add i32 %8, %2 + %12 = add i32 %11, %10 + %13 = icmp ne i32 %12, 0 + %14 = zext i1 %13 to i32 + %15 = sub i32 %12, %14 + %16 = udiv i32 %15, 6282 + %17 = add nuw nsw i32 %16, %14 + %18 = mul i32 %17, 6282 + %19 = sub i32 %3, %8 + %20 = add i32 %19, %18 + %21 = mul i32 %20, %20 + %22 = sub i32 0, %21 + %23 = mul nsw i32 %20, %22 + %24 = sdiv i32 %23, 6 + %25 = add nsw i32 %24, %20 + %26 = add i32 %23, 5 + %27 = icmp ult i32 %26, 11 + br i1 %27, label %40, label %28 + +28: ; preds = %1, %28 + %29 = phi i32 [ %38, %28 ], [ 2, %1 ] + %30 = phi i32 [ %37, %28 ], [ %25, %1 ] + %31 = phi i32 [ %36, %28 ], [ %24, %1 ] + %32 = mul nsw i32 %31, %22 + %33 = shl nuw nsw i32 %29, 1 + %34 = or i32 %33, 1 + %35 = mul nsw i32 %34, %33 + %36 = sdiv i32 %32, %35 + %37 = add nsw i32 %36, %30 + %38 = add nuw nsw i32 %29, 1 + %39 = icmp eq i32 %36, 0 + br i1 %39, label %40, label %28, !llvm.loop !5 + +40: ; preds = %28, %1 + %41 = phi i32 [ %25, %1 ], [ %37, %28 ] + ret i32 %41 +} + +; Function Attrs: nofree nosync nounwind sspstrong uwtable +define dso_local i32 @encode(i32 %0, i32 %1) local_unnamed_addr #2 { + %3 = load i32, i32* getelementptr inbounds ([24 x i32], [24 x i32]* @tqmf, i64 0, i64 0), align 16, !tbaa !8 + %4 = sext i32 %3 to i64 + %5 = load i32, i32* getelementptr inbounds ([24 x i32], [24 x i32]* @h, i64 0, i64 0), align 16, !tbaa !8 + %6 = sext i32 %5 to i64 + %7 = mul nsw i64 %6, %4 + %8 = load i32, i32* getelementptr inbounds ([24 x i32], [24 x i32]* @tqmf, i64 0, i64 1), align 4, !tbaa !8 + %9 = sext i32 %8 to i64 + %10 = load i32, i32* getelementptr inbounds ([24 x i32], [24 x i32]* @h, i64 0, i64 1), align 4, !tbaa !8 + %11 = sext i32 %10 to i64 + %12 = mul nsw i64 %11, %9 + br label %13 + +13: ; preds = %2, %13 + %14 = phi i32 [ 0, %2 ], [ %35, %13 ] + %15 = phi i64 [ %12, %2 ], [ %34, %13 ] + %16 = phi i64 [ %7, %2 ], [ %26, %13 ] + %17 = phi i32* [ getelementptr inbounds ([24 x i32], [24 x i32]* @tqmf, i64 0, i64 2), %2 ], [ %27, %13 ] + %18 = phi i32* [ getelementptr inbounds ([24 x i32], [24 x i32]* @h, i64 0, i64 2), %2 ], [ %30, %13 ] + %19 = getelementptr inbounds i32, i32* %17, i64 1 + %20 = load i32, i32* %17, align 4, !tbaa !8 + %21 = sext i32 %20 to i64 + %22 = getelementptr inbounds i32, i32* %18, i64 1 + %23 = load i32, i32* %18, align 4, !tbaa !8 + %24 = sext i32 %23 to i64 + %25 = mul nsw i64 %24, %21 + %26 = add nsw i64 %25, %16 + %27 = getelementptr inbounds i32, i32* %17, i64 2 + %28 = load i32, i32* %19, align 4, !tbaa !8 + %29 = sext i32 %28 to i64 + %30 = getelementptr inbounds i32, i32* %18, i64 2 + %31 = load i32, i32* %22, align 4, !tbaa !8 + %32 = sext i32 %31 to i64 + %33 = mul nsw i64 %32, %29 + %34 = add nsw i64 %33, %15 + %35 = add nuw nsw i32 %14, 1 + %36 = icmp eq i32 %35, 10 + br i1 %36, label %37, label %13, !llvm.loop !12 + +37: ; preds = %13 + %38 = load i32, i32* getelementptr inbounds ([24 x i32], [24 x i32]* @tqmf, i64 0, i64 22), align 8, !tbaa !8 + %39 = load i32, i32* getelementptr inbounds ([24 x i32], [24 x i32]* @h, i64 0, i64 22), align 8, !tbaa !8 + %40 = load i32, i32* getelementptr inbounds ([24 x i32], [24 x i32]* @tqmf, i64 0, i64 23), align 4, !tbaa !8 + %41 = load i32, i32* getelementptr inbounds ([24 x i32], [24 x i32]* @h, i64 0, i64 23), align 4, !tbaa !8 + call void @llvm.memmove.p0i8.p0i8.i64(i8* noundef nonnull align 8 dereferenceable(88) bitcast (i32* getelementptr inbounds ([24 x i32], [24 x i32]* @tqmf, i64 0, i64 2) to i8*), i8* noundef nonnull align 16 dereferenceable(88) bitcast ([24 x i32]* @tqmf to i8*), i64 88, i1 false) + %42 = sext i32 %41 to i64 + %43 = sext i32 %40 to i64 + %44 = mul nsw i64 %42, %43 + %45 = sext i32 %39 to i64 + %46 = sext i32 %38 to i64 + %47 = mul nsw i64 %45, %46 + %48 = add nsw i64 %47, %26 + %49 = add nsw i64 %44, %34 + store i32 %0, i32* getelementptr inbounds ([24 x i32], [24 x i32]* @tqmf, i64 0, i64 1), align 4, !tbaa !8 + store i32 %1, i32* getelementptr inbounds ([24 x i32], [24 x i32]* @tqmf, i64 0, i64 0), align 16, !tbaa !8 + %50 = add nsw i64 %49, %48 + %51 = lshr i64 %50, 15 + %52 = trunc i64 %51 to i32 + store i32 %52, i32* @xl, align 4, !tbaa !8 + %53 = sub nsw i64 %48, %49 + %54 = lshr i64 %53, 15 + %55 = trunc i64 %54 to i32 + store i32 %55, i32* @xh, align 4, !tbaa !8 + %56 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @delay_bpl, i64 0, i64 0), align 16, !tbaa !8 + %57 = sext i32 %56 to i64 + %58 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @delay_dltx, i64 0, i64 0), align 16, !tbaa !8 + %59 = sext i32 %58 to i64 + %60 = mul nsw i64 %59, %57 + br label %61 + +61: ; preds = %61, %37 + %62 = phi i32* [ getelementptr inbounds ([6 x i32], [6 x i32]* @delay_bpl, i64 0, i64 0), %37 ], [ %67, %61 ] + %63 = phi i32* [ getelementptr inbounds ([6 x i32], [6 x i32]* @delay_dltx, i64 0, i64 0), %37 ], [ %66, %61 ] + %64 = phi i64 [ %60, %37 ], [ %73, %61 ] + %65 = phi i32 [ 1, %37 ], [ %74, %61 ] + %66 = getelementptr inbounds i32, i32* %63, i64 1 + %67 = getelementptr inbounds i32, i32* %62, i64 1 + %68 = load i32, i32* %67, align 4, !tbaa !8 + %69 = sext i32 %68 to i64 + %70 = load i32, i32* %66, align 4, !tbaa !8 + %71 = sext i32 %70 to i64 + %72 = mul nsw i64 %71, %69 + %73 = add nsw i64 %72, %64 + %74 = add nuw nsw i32 %65, 1 + %75 = icmp eq i32 %74, 6 + br i1 %75, label %76, label %61, !llvm.loop !13 + +76: ; preds = %61 + %77 = lshr i64 %73, 14 + %78 = trunc i64 %77 to i32 + store i32 %78, i32* @szl, align 4, !tbaa !8 + %79 = load i32, i32* @rlt1, align 4, !tbaa !8 + %80 = load i32, i32* @al1, align 4, !tbaa !8 + %81 = load i32, i32* @rlt2, align 4, !tbaa !8 + %82 = load i32, i32* @al2, align 4, !tbaa !8 + %83 = shl nsw i32 %79, 1 + %84 = sext i32 %83 to i64 + %85 = sext i32 %80 to i64 + %86 = mul nsw i64 %85, %84 + %87 = shl nsw i32 %81, 1 + %88 = sext i32 %87 to i64 + %89 = sext i32 %82 to i64 + %90 = mul nsw i64 %89, %88 + %91 = add nsw i64 %90, %86 + %92 = lshr i64 %91, 15 + %93 = trunc i64 %92 to i32 + store i32 %93, i32* @spl, align 4, !tbaa !8 + %94 = add nsw i32 %93, %78 + store i32 %94, i32* @sl, align 4, !tbaa !8 + %95 = sub nsw i32 %52, %94 + store i32 %95, i32* @el, align 4, !tbaa !8 + %96 = load i32, i32* @detl, align 4, !tbaa !8 + %97 = call i32 @llvm.abs.i32(i32 %95, i1 true) #11 + %98 = zext i32 %97 to i64 + %99 = sext i32 %96 to i64 + br label %100 + +100: ; preds = %108, %76 + %101 = phi i64 [ 0, %76 ], [ %109, %108 ] + %102 = getelementptr inbounds [30 x i32], [30 x i32]* @decis_levl, i64 0, i64 %101 + %103 = load i32, i32* %102, align 4, !tbaa !8 + %104 = sext i32 %103 to i64 + %105 = mul nsw i64 %104, %99 + %106 = ashr i64 %105, 15 + %107 = icmp slt i64 %106, %98 + br i1 %107, label %108, label %111 + +108: ; preds = %100 + %109 = add nuw nsw i64 %101, 1 + %110 = icmp eq i64 %109, 30 + br i1 %110, label %111, label %100, !llvm.loop !14 + +111: ; preds = %100, %108 + %112 = phi i64 [ %101, %100 ], [ 30, %108 ] + %113 = icmp sgt i32 %95, -1 + %114 = and i64 %112, 4294967295 + %115 = getelementptr inbounds [31 x i32], [31 x i32]* @quant26bt_pos, i64 0, i64 %114 + %116 = getelementptr inbounds [31 x i32], [31 x i32]* @quant26bt_neg, i64 0, i64 %114 + %117 = select i1 %113, i32* %115, i32* %116 + %118 = load i32, i32* %117, align 4, !tbaa !8 + store i32 %118, i32* @il, align 4, !tbaa !8 + %119 = ashr i32 %118, 2 + %120 = sext i32 %119 to i64 + %121 = getelementptr inbounds [16 x i32], [16 x i32]* @qq4_code4_table, i64 0, i64 %120 + %122 = load i32, i32* %121, align 4, !tbaa !8 + %123 = sext i32 %122 to i64 + %124 = mul nsw i64 %123, %99 + %125 = lshr i64 %124, 15 + %126 = trunc i64 %125 to i32 + store i32 %126, i32* @dlt, align 4, !tbaa !8 + %127 = load i32, i32* @nbl, align 4, !tbaa !8 + %128 = sext i32 %127 to i64 + %129 = mul nsw i64 %128, 127 + %130 = lshr i64 %129, 7 + %131 = trunc i64 %130 to i32 + %132 = getelementptr inbounds [16 x i32], [16 x i32]* @wl_code_table, i64 0, i64 %120 + %133 = load i32, i32* %132, align 4, !tbaa !8 + %134 = add nsw i32 %133, %131 + %135 = icmp sgt i32 %134, 0 + %136 = select i1 %135, i32 %134, i32 0 + %137 = icmp slt i32 %136, 18432 + %138 = select i1 %137, i32 %136, i32 18432 + store i32 %138, i32* @nbl, align 4, !tbaa !8 + %139 = lshr i32 %138, 6 + %140 = and i32 %139, 31 + %141 = lshr i32 %138, 11 + %142 = zext i32 %140 to i64 + %143 = getelementptr inbounds [32 x i32], [32 x i32]* @ilb_table, i64 0, i64 %142 + %144 = load i32, i32* %143, align 4, !tbaa !8 + %145 = sub nsw i32 9, %141 + %146 = ashr i32 %144, %145 + %147 = shl i32 %146, 3 + store i32 %147, i32* @detl, align 4, !tbaa !8 + %148 = add nsw i32 %126, %78 + store i32 %148, i32* @plt, align 4, !tbaa !8 + %149 = icmp eq i32 %126, 0 + br i1 %149, label %153, label %150 + +150: ; preds = %111 + %151 = shl i64 %125, 32 + %152 = ashr exact i64 %151, 32 + br label %163 + +153: ; preds = %111, %153 + %154 = phi i64 [ %161, %153 ], [ 0, %111 ] + %155 = getelementptr inbounds [6 x i32], [6 x i32]* @delay_bpl, i64 0, i64 %154 + %156 = load i32, i32* %155, align 4, !tbaa !8 + %157 = sext i32 %156 to i64 + %158 = mul nsw i64 %157, 255 + %159 = lshr i64 %158, 8 + %160 = trunc i64 %159 to i32 + store i32 %160, i32* %155, align 4, !tbaa !8 + %161 = add nuw nsw i64 %154, 1 + %162 = icmp eq i64 %161, 6 + br i1 %162, label %180, label %153, !llvm.loop !15 + +163: ; preds = %163, %150 + %164 = phi i64 [ 0, %150 ], [ %178, %163 ] + %165 = getelementptr inbounds [6 x i32], [6 x i32]* @delay_dltx, i64 0, i64 %164 + %166 = load i32, i32* %165, align 4, !tbaa !8 + %167 = sext i32 %166 to i64 + %168 = mul nsw i64 %152, %167 + %169 = icmp sgt i64 %168, -1 + %170 = select i1 %169, i32 128, i32 -128 + %171 = getelementptr inbounds [6 x i32], [6 x i32]* @delay_bpl, i64 0, i64 %164 + %172 = load i32, i32* %171, align 4, !tbaa !8 + %173 = sext i32 %172 to i64 + %174 = mul nsw i64 %173, 255 + %175 = lshr i64 %174, 8 + %176 = trunc i64 %175 to i32 + %177 = add nsw i32 %170, %176 + store i32 %177, i32* %171, align 4, !tbaa !8 + %178 = add nuw nsw i64 %164, 1 + %179 = icmp eq i64 %178, 6 + br i1 %179, label %180, label %163, !llvm.loop !16 + +180: ; preds = %163, %153 + %181 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @delay_dltx, i64 0, i64 4), align 16, !tbaa !8 + store i32 %181, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @delay_dltx, i64 0, i64 5), align 4, !tbaa !8 + %182 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @delay_dltx, i64 0, i64 3), align 4, !tbaa !8 + store i32 %182, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @delay_dltx, i64 0, i64 4), align 16, !tbaa !8 + %183 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @delay_dltx, i64 0, i64 2), align 8, !tbaa !8 + store i32 %183, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @delay_dltx, i64 0, i64 3), align 4, !tbaa !8 + store i32 %58, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @delay_dltx, i64 0, i64 1), align 4, !tbaa !8 + store i32 %126, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @delay_dltx, i64 0, i64 0), align 16, !tbaa !8 + %184 = load i32, i32* @plt1, align 4, !tbaa !8 + %185 = load i32, i32* @plt2, align 4, !tbaa !8 + %186 = shl nsw i64 %85, 2 + %187 = sext i32 %148 to i64 + %188 = sext i32 %184 to i64 + %189 = mul nsw i64 %188, %187 + %190 = icmp sgt i64 %189, -1 + %191 = sub nsw i64 0, %186 + %192 = select i1 %190, i64 %191, i64 %186 + %193 = lshr i64 %192, 7 + %194 = sext i32 %185 to i64 + %195 = mul nsw i64 %194, %187 + %196 = icmp sgt i64 %195, -1 + %197 = select i1 %196, i64 128, i64 4294967168 + %198 = mul nsw i64 %89, 127 + %199 = lshr i64 %198, 7 + %200 = add nuw nsw i64 %197, %199 + %201 = add nuw nsw i64 %200, %193 + %202 = trunc i64 %201 to i32 + %203 = icmp slt i32 %202, 12288 + %204 = select i1 %203, i32 %202, i32 12288 + %205 = icmp sgt i32 %204, -12288 + %206 = select i1 %205, i32 %204, i32 -12288 + store i32 %206, i32* @al2, align 4, !tbaa !8 + %207 = mul nsw i64 %85, 255 + %208 = lshr i64 %207, 8 + %209 = trunc i64 %208 to i32 + %210 = select i1 %190, i32 192, i32 -192 + %211 = add nsw i32 %210, %209 + %212 = add nsw i32 %206, -15360 + %213 = sub nsw i32 15360, %206 + %214 = icmp sgt i32 %211, %213 + %215 = select i1 %214, i32 %213, i32 %211 + %216 = icmp slt i32 %215, %212 + %217 = select i1 %216, i32 %212, i32 %215 + store i32 %217, i32* @al1, align 4, !tbaa !8 + %218 = add nsw i32 %94, %126 + store i32 %218, i32* @rlt, align 4, !tbaa !8 + store i32 %79, i32* @rlt2, align 4, !tbaa !8 + store i32 %218, i32* @rlt1, align 4, !tbaa !8 + store i32 %184, i32* @plt2, align 4, !tbaa !8 + store i32 %148, i32* @plt1, align 4, !tbaa !8 + %219 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @delay_bph, i64 0, i64 0), align 16, !tbaa !8 + %220 = sext i32 %219 to i64 + %221 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @delay_dhx, i64 0, i64 0), align 16, !tbaa !8 + %222 = sext i32 %221 to i64 + %223 = mul nsw i64 %222, %220 + br label %224 + +224: ; preds = %224, %180 + %225 = phi i32* [ getelementptr inbounds ([6 x i32], [6 x i32]* @delay_bph, i64 0, i64 0), %180 ], [ %230, %224 ] + %226 = phi i32* [ getelementptr inbounds ([6 x i32], [6 x i32]* @delay_dhx, i64 0, i64 0), %180 ], [ %229, %224 ] + %227 = phi i64 [ %223, %180 ], [ %236, %224 ] + %228 = phi i32 [ 1, %180 ], [ %237, %224 ] + %229 = getelementptr inbounds i32, i32* %226, i64 1 + %230 = getelementptr inbounds i32, i32* %225, i64 1 + %231 = load i32, i32* %230, align 4, !tbaa !8 + %232 = sext i32 %231 to i64 + %233 = load i32, i32* %229, align 4, !tbaa !8 + %234 = sext i32 %233 to i64 + %235 = mul nsw i64 %234, %232 + %236 = add nsw i64 %235, %227 + %237 = add nuw nsw i32 %228, 1 + %238 = icmp eq i32 %237, 6 + br i1 %238, label %239, label %224, !llvm.loop !13 + +239: ; preds = %224 + %240 = lshr i64 %236, 14 + %241 = trunc i64 %240 to i32 + store i32 %241, i32* @szh, align 4, !tbaa !8 + %242 = load i32, i32* @rh1, align 4, !tbaa !8 + %243 = load i32, i32* @ah1, align 4, !tbaa !8 + %244 = load i32, i32* @rh2, align 4, !tbaa !8 + %245 = load i32, i32* @ah2, align 4, !tbaa !8 + %246 = shl nsw i32 %242, 1 + %247 = sext i32 %246 to i64 + %248 = sext i32 %243 to i64 + %249 = mul nsw i64 %248, %247 + %250 = shl nsw i32 %244, 1 + %251 = sext i32 %250 to i64 + %252 = sext i32 %245 to i64 + %253 = mul nsw i64 %252, %251 + %254 = add nsw i64 %253, %249 + %255 = lshr i64 %254, 15 + %256 = trunc i64 %255 to i32 + store i32 %256, i32* @sph, align 4, !tbaa !8 + %257 = add nsw i32 %256, %241 + store i32 %257, i32* @sh, align 4, !tbaa !8 + %258 = sub nsw i32 %55, %257 + store i32 %258, i32* @eh, align 4, !tbaa !8 + %259 = icmp sgt i32 %258, -1 + %260 = select i1 %259, i32 3, i32 1 + store i32 %260, i32* @ih, align 4, !tbaa !8 + %261 = load i32, i32* @deth, align 4, !tbaa !8 + %262 = sext i32 %261 to i64 + %263 = mul nsw i64 %262, 564 + %264 = ashr i64 %263, 12 + %265 = trunc i64 %264 to i32 + %266 = call i32 @llvm.abs.i32(i32 %258, i1 true) #11 + %267 = icmp sgt i32 %266, %265 + br i1 %267, label %268, label %270 + +268: ; preds = %239 + %269 = add nsw i32 %260, -1 + store i32 %269, i32* @ih, align 4, !tbaa !8 + br label %270 + +270: ; preds = %268, %239 + %271 = load i32, i32* @ih, align 4, !tbaa !8 + %272 = sext i32 %271 to i64 + %273 = getelementptr inbounds [4 x i32], [4 x i32]* @qq2_code2_table, i64 0, i64 %272 + %274 = load i32, i32* %273, align 4, !tbaa !8 + %275 = sext i32 %274 to i64 + %276 = mul nsw i64 %275, %262 + %277 = lshr i64 %276, 15 + %278 = trunc i64 %277 to i32 + store i32 %278, i32* @dh, align 4, !tbaa !8 + %279 = load i32, i32* @nbh, align 4, !tbaa !8 + %280 = sext i32 %279 to i64 + %281 = mul nsw i64 %280, 127 + %282 = lshr i64 %281, 7 + %283 = trunc i64 %282 to i32 + %284 = getelementptr inbounds [4 x i32], [4 x i32]* @wh_code_table, i64 0, i64 %272 + %285 = load i32, i32* %284, align 4, !tbaa !8 + %286 = add nsw i32 %285, %283 + %287 = icmp sgt i32 %286, 0 + %288 = select i1 %287, i32 %286, i32 0 + %289 = icmp slt i32 %288, 22528 + %290 = select i1 %289, i32 %288, i32 22528 + store i32 %290, i32* @nbh, align 4, !tbaa !8 + %291 = lshr i32 %290, 6 + %292 = and i32 %291, 31 + %293 = lshr i32 %290, 11 + %294 = zext i32 %292 to i64 + %295 = getelementptr inbounds [32 x i32], [32 x i32]* @ilb_table, i64 0, i64 %294 + %296 = load i32, i32* %295, align 4, !tbaa !8 + %297 = sub nsw i32 11, %293 + %298 = ashr i32 %296, %297 + %299 = shl i32 %298, 3 + store i32 %299, i32* @deth, align 4, !tbaa !8 + %300 = add nsw i32 %278, %241 + store i32 %300, i32* @ph, align 4, !tbaa !8 + %301 = icmp eq i32 %278, 0 + br i1 %301, label %305, label %302 + +302: ; preds = %270 + %303 = shl i64 %277, 32 + %304 = ashr exact i64 %303, 32 + br label %315 + +305: ; preds = %270, %305 + %306 = phi i64 [ %313, %305 ], [ 0, %270 ] + %307 = getelementptr inbounds [6 x i32], [6 x i32]* @delay_bph, i64 0, i64 %306 + %308 = load i32, i32* %307, align 4, !tbaa !8 + %309 = sext i32 %308 to i64 + %310 = mul nsw i64 %309, 255 + %311 = lshr i64 %310, 8 + %312 = trunc i64 %311 to i32 + store i32 %312, i32* %307, align 4, !tbaa !8 + %313 = add nuw nsw i64 %306, 1 + %314 = icmp eq i64 %313, 6 + br i1 %314, label %332, label %305, !llvm.loop !15 + +315: ; preds = %315, %302 + %316 = phi i64 [ 0, %302 ], [ %330, %315 ] + %317 = getelementptr inbounds [6 x i32], [6 x i32]* @delay_dhx, i64 0, i64 %316 + %318 = load i32, i32* %317, align 4, !tbaa !8 + %319 = sext i32 %318 to i64 + %320 = mul nsw i64 %304, %319 + %321 = icmp sgt i64 %320, -1 + %322 = select i1 %321, i32 128, i32 -128 + %323 = getelementptr inbounds [6 x i32], [6 x i32]* @delay_bph, i64 0, i64 %316 + %324 = load i32, i32* %323, align 4, !tbaa !8 + %325 = sext i32 %324 to i64 + %326 = mul nsw i64 %325, 255 + %327 = lshr i64 %326, 8 + %328 = trunc i64 %327 to i32 + %329 = add nsw i32 %322, %328 + store i32 %329, i32* %323, align 4, !tbaa !8 + %330 = add nuw nsw i64 %316, 1 + %331 = icmp eq i64 %330, 6 + br i1 %331, label %332, label %315, !llvm.loop !16 + +332: ; preds = %315, %305 + %333 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @delay_dhx, i64 0, i64 4), align 16, !tbaa !8 + store i32 %333, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @delay_dhx, i64 0, i64 5), align 4, !tbaa !8 + %334 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @delay_dhx, i64 0, i64 3), align 4, !tbaa !8 + store i32 %334, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @delay_dhx, i64 0, i64 4), align 16, !tbaa !8 + %335 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @delay_dhx, i64 0, i64 2), align 8, !tbaa !8 + store i32 %335, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @delay_dhx, i64 0, i64 3), align 4, !tbaa !8 + store i32 %221, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @delay_dhx, i64 0, i64 1), align 4, !tbaa !8 + store i32 %278, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @delay_dhx, i64 0, i64 0), align 16, !tbaa !8 + %336 = load i32, i32* @ph1, align 4, !tbaa !8 + %337 = load i32, i32* @ph2, align 4, !tbaa !8 + %338 = shl nsw i64 %248, 2 + %339 = sext i32 %300 to i64 + %340 = sext i32 %336 to i64 + %341 = mul nsw i64 %340, %339 + %342 = icmp sgt i64 %341, -1 + %343 = sub nsw i64 0, %338 + %344 = select i1 %342, i64 %343, i64 %338 + %345 = lshr i64 %344, 7 + %346 = sext i32 %337 to i64 + %347 = mul nsw i64 %346, %339 + %348 = icmp sgt i64 %347, -1 + %349 = select i1 %348, i64 128, i64 4294967168 + %350 = mul nsw i64 %252, 127 + %351 = lshr i64 %350, 7 + %352 = add nuw nsw i64 %349, %351 + %353 = add nuw nsw i64 %352, %345 + %354 = trunc i64 %353 to i32 + %355 = icmp slt i32 %354, 12288 + %356 = select i1 %355, i32 %354, i32 12288 + %357 = icmp sgt i32 %356, -12288 + %358 = select i1 %357, i32 %356, i32 -12288 + store i32 %358, i32* @ah2, align 4, !tbaa !8 + %359 = mul nsw i64 %248, 255 + %360 = lshr i64 %359, 8 + %361 = trunc i64 %360 to i32 + %362 = select i1 %342, i32 192, i32 -192 + %363 = add nsw i32 %362, %361 + %364 = add nsw i32 %358, -15360 + %365 = sub nsw i32 15360, %358 + %366 = icmp sgt i32 %363, %365 + %367 = select i1 %366, i32 %365, i32 %363 + %368 = icmp slt i32 %367, %364 + %369 = select i1 %368, i32 %364, i32 %367 + store i32 %369, i32* @ah1, align 4, !tbaa !8 + %370 = add nsw i32 %257, %278 + store i32 %370, i32* @yh, align 4, !tbaa !8 + store i32 %242, i32* @rh2, align 4, !tbaa !8 + store i32 %370, i32* @rh1, align 4, !tbaa !8 + store i32 %336, i32* @ph2, align 4, !tbaa !8 + store i32 %300, i32* @ph1, align 4, !tbaa !8 + %371 = shl i32 %271, 6 + %372 = or i32 %371, %118 + ret i32 %372 +} + +; Function Attrs: nofree norecurse nosync nounwind readonly sspstrong uwtable +define dso_local i32 @filtez(i32* nocapture readonly %0, i32* nocapture readonly %1) local_unnamed_addr #3 { + %3 = load i32, i32* %0, align 4, !tbaa !8 + %4 = sext i32 %3 to i64 + %5 = load i32, i32* %1, align 4, !tbaa !8 + %6 = sext i32 %5 to i64 + %7 = mul nsw i64 %6, %4 + br label %8 + +8: ; preds = %2, %8 + %9 = phi i32* [ %0, %2 ], [ %14, %8 ] + %10 = phi i32* [ %1, %2 ], [ %13, %8 ] + %11 = phi i64 [ %7, %2 ], [ %20, %8 ] + %12 = phi i32 [ 1, %2 ], [ %21, %8 ] + %13 = getelementptr inbounds i32, i32* %10, i64 1 + %14 = getelementptr inbounds i32, i32* %9, i64 1 + %15 = load i32, i32* %14, align 4, !tbaa !8 + %16 = sext i32 %15 to i64 + %17 = load i32, i32* %13, align 4, !tbaa !8 + %18 = sext i32 %17 to i64 + %19 = mul nsw i64 %18, %16 + %20 = add nsw i64 %19, %11 + %21 = add nuw nsw i32 %12, 1 + %22 = icmp eq i32 %21, 6 + br i1 %22, label %23, label %8, !llvm.loop !13 + +23: ; preds = %8 + %24 = lshr i64 %20, 14 + %25 = trunc i64 %24 to i32 + ret i32 %25 +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone sspstrong uwtable willreturn +define dso_local i32 @filtep(i32 %0, i32 %1, i32 %2, i32 %3) local_unnamed_addr #0 { + %5 = shl nsw i32 %0, 1 + %6 = sext i32 %5 to i64 + %7 = sext i32 %1 to i64 + %8 = mul nsw i64 %7, %6 + %9 = shl nsw i32 %2, 1 + %10 = sext i32 %9 to i64 + %11 = sext i32 %3 to i64 + %12 = mul nsw i64 %11, %10 + %13 = add nsw i64 %12, %8 + %14 = lshr i64 %13, 15 + %15 = trunc i64 %14 to i32 + ret i32 %15 +} + +; Function Attrs: nofree nosync nounwind readonly sspstrong uwtable +define dso_local i32 @quantl(i32 %0, i32 %1) local_unnamed_addr #4 { + %3 = call i32 @llvm.abs.i32(i32 %0, i1 true) #11 + %4 = zext i32 %3 to i64 + %5 = sext i32 %1 to i64 + br label %6 + +6: ; preds = %2, %14 + %7 = phi i64 [ 0, %2 ], [ %15, %14 ] + %8 = getelementptr inbounds [30 x i32], [30 x i32]* @decis_levl, i64 0, i64 %7 + %9 = load i32, i32* %8, align 4, !tbaa !8 + %10 = sext i32 %9 to i64 + %11 = mul nsw i64 %10, %5 + %12 = ashr i64 %11, 15 + %13 = icmp slt i64 %12, %4 + br i1 %13, label %14, label %17 + +14: ; preds = %6 + %15 = add nuw nsw i64 %7, 1 + %16 = icmp eq i64 %15, 30 + br i1 %16, label %17, label %6, !llvm.loop !14 + +17: ; preds = %6, %14 + %18 = phi i64 [ %7, %6 ], [ 30, %14 ] + %19 = icmp sgt i32 %0, -1 + %20 = and i64 %18, 4294967295 + %21 = getelementptr inbounds [31 x i32], [31 x i32]* @quant26bt_pos, i64 0, i64 %20 + %22 = getelementptr inbounds [31 x i32], [31 x i32]* @quant26bt_neg, i64 0, i64 %20 + %23 = select i1 %19, i32* %21, i32* %22 + %24 = load i32, i32* %23, align 4, !tbaa !8 + ret i32 %24 +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind readonly sspstrong uwtable willreturn +define dso_local i32 @logscl(i32 %0, i32 %1) local_unnamed_addr #5 { + %3 = sext i32 %1 to i64 + %4 = mul nsw i64 %3, 127 + %5 = lshr i64 %4, 7 + %6 = trunc i64 %5 to i32 + %7 = ashr i32 %0, 2 + %8 = sext i32 %7 to i64 + %9 = getelementptr inbounds [16 x i32], [16 x i32]* @wl_code_table, i64 0, i64 %8 + %10 = load i32, i32* %9, align 4, !tbaa !8 + %11 = add nsw i32 %10, %6 + %12 = icmp sgt i32 %11, 0 + %13 = select i1 %12, i32 %11, i32 0 + %14 = icmp slt i32 %13, 18432 + %15 = select i1 %14, i32 %13, i32 18432 + ret i32 %15 +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind readonly sspstrong uwtable willreturn +define dso_local i32 @scalel(i32 %0, i32 %1) local_unnamed_addr #5 { + %3 = lshr i32 %0, 6 + %4 = and i32 %3, 31 + %5 = ashr i32 %0, 11 + %6 = zext i32 %4 to i64 + %7 = getelementptr inbounds [32 x i32], [32 x i32]* @ilb_table, i64 0, i64 %6 + %8 = load i32, i32* %7, align 4, !tbaa !8 + %9 = add nsw i32 %1, 1 + %10 = sub i32 %9, %5 + %11 = ashr i32 %8, %10 + %12 = shl i32 %11, 3 + ret i32 %12 +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local void @upzero(i32 %0, i32* nocapture %1, i32* nocapture %2) local_unnamed_addr #6 { + %4 = icmp eq i32 %0, 0 + br i1 %4, label %7, label %5 + +5: ; preds = %3 + %6 = sext i32 %0 to i64 + br label %17 + +7: ; preds = %3, %7 + %8 = phi i64 [ %15, %7 ], [ 0, %3 ] + %9 = getelementptr inbounds i32, i32* %2, i64 %8 + %10 = load i32, i32* %9, align 4, !tbaa !8 + %11 = sext i32 %10 to i64 + %12 = mul nsw i64 %11, 255 + %13 = lshr i64 %12, 8 + %14 = trunc i64 %13 to i32 + store i32 %14, i32* %9, align 4, !tbaa !8 + %15 = add nuw nsw i64 %8, 1 + %16 = icmp eq i64 %15, 6 + br i1 %16, label %34, label %7, !llvm.loop !15 + +17: ; preds = %5, %17 + %18 = phi i64 [ 0, %5 ], [ %32, %17 ] + %19 = getelementptr inbounds i32, i32* %1, i64 %18 + %20 = load i32, i32* %19, align 4, !tbaa !8 + %21 = sext i32 %20 to i64 + %22 = mul nsw i64 %21, %6 + %23 = icmp sgt i64 %22, -1 + %24 = select i1 %23, i32 128, i32 -128 + %25 = getelementptr inbounds i32, i32* %2, i64 %18 + %26 = load i32, i32* %25, align 4, !tbaa !8 + %27 = sext i32 %26 to i64 + %28 = mul nsw i64 %27, 255 + %29 = lshr i64 %28, 8 + %30 = trunc i64 %29 to i32 + %31 = add nsw i32 %24, %30 + store i32 %31, i32* %25, align 4, !tbaa !8 + %32 = add nuw nsw i64 %18, 1 + %33 = icmp eq i64 %32, 6 + br i1 %33, label %34, label %17, !llvm.loop !16 + +34: ; preds = %17, %7 + %35 = getelementptr inbounds i32, i32* %1, i64 4 + %36 = load i32, i32* %35, align 4, !tbaa !8 + %37 = getelementptr inbounds i32, i32* %1, i64 5 + store i32 %36, i32* %37, align 4, !tbaa !8 + %38 = getelementptr inbounds i32, i32* %1, i64 3 + %39 = load i32, i32* %38, align 4, !tbaa !8 + store i32 %39, i32* %35, align 4, !tbaa !8 + %40 = getelementptr inbounds i32, i32* %1, i64 2 + %41 = load i32, i32* %40, align 4, !tbaa !8 + store i32 %41, i32* %38, align 4, !tbaa !8 + %42 = load i32, i32* %1, align 4, !tbaa !8 + %43 = getelementptr inbounds i32, i32* %1, i64 1 + store i32 %42, i32* %43, align 4, !tbaa !8 + store i32 %0, i32* %1, align 4, !tbaa !8 + ret void +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone sspstrong uwtable willreturn +define dso_local i32 @uppol2(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4) local_unnamed_addr #0 { + %6 = sext i32 %0 to i64 + %7 = shl nsw i64 %6, 2 + %8 = sext i32 %2 to i64 + %9 = sext i32 %3 to i64 + %10 = mul nsw i64 %9, %8 + %11 = icmp sgt i64 %10, -1 + %12 = sub nsw i64 0, %7 + %13 = select i1 %11, i64 %12, i64 %7 + %14 = lshr i64 %13, 7 + %15 = sext i32 %4 to i64 + %16 = mul nsw i64 %15, %8 + %17 = icmp sgt i64 %16, -1 + %18 = select i1 %17, i64 128, i64 4294967168 + %19 = sext i32 %1 to i64 + %20 = mul nsw i64 %19, 127 + %21 = lshr i64 %20, 7 + %22 = add nuw nsw i64 %18, %21 + %23 = add nuw nsw i64 %22, %14 + %24 = trunc i64 %23 to i32 + %25 = icmp slt i32 %24, 12288 + %26 = select i1 %25, i32 %24, i32 12288 + %27 = icmp sgt i32 %26, -12288 + %28 = select i1 %27, i32 %26, i32 -12288 + ret i32 %28 +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone sspstrong uwtable willreturn +define dso_local i32 @uppol1(i32 %0, i32 %1, i32 %2, i32 %3) local_unnamed_addr #0 { + %5 = sext i32 %0 to i64 + %6 = mul nsw i64 %5, 255 + %7 = lshr i64 %6, 8 + %8 = sext i32 %2 to i64 + %9 = sext i32 %3 to i64 + %10 = mul nsw i64 %9, %8 + %11 = icmp sgt i64 %10, -1 + %12 = trunc i64 %7 to i32 + %13 = select i1 %11, i32 192, i32 -192 + %14 = add nsw i32 %13, %12 + %15 = add i32 %1, -15360 + %16 = sub nsw i32 15360, %1 + %17 = icmp sgt i32 %14, %16 + %18 = select i1 %17, i32 %16, i32 %14 + %19 = icmp slt i32 %18, %15 + %20 = select i1 %19, i32 %15, i32 %18 + ret i32 %20 +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind readonly sspstrong uwtable willreturn +define dso_local i32 @logsch(i32 %0, i32 %1) local_unnamed_addr #5 { + %3 = sext i32 %1 to i64 + %4 = mul nsw i64 %3, 127 + %5 = lshr i64 %4, 7 + %6 = trunc i64 %5 to i32 + %7 = sext i32 %0 to i64 + %8 = getelementptr inbounds [4 x i32], [4 x i32]* @wh_code_table, i64 0, i64 %7 + %9 = load i32, i32* %8, align 4, !tbaa !8 + %10 = add nsw i32 %9, %6 + %11 = icmp sgt i32 %10, 0 + %12 = select i1 %11, i32 %10, i32 0 + %13 = icmp slt i32 %12, 22528 + %14 = select i1 %13, i32 %12, i32 22528 + ret i32 %14 +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local void @decode(i32 %0) local_unnamed_addr #6 { + %2 = and i32 %0, 63 + store i32 %2, i32* @ilr, align 4, !tbaa !8 + %3 = ashr i32 %0, 6 + store i32 %3, i32* @ih, align 4, !tbaa !8 + %4 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @dec_del_bpl, i64 0, i64 0), align 16, !tbaa !8 + %5 = sext i32 %4 to i64 + %6 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @dec_del_dltx, i64 0, i64 0), align 16, !tbaa !8 + %7 = sext i32 %6 to i64 + %8 = mul nsw i64 %7, %5 + br label %9 + +9: ; preds = %9, %1 + %10 = phi i32* [ getelementptr inbounds ([6 x i32], [6 x i32]* @dec_del_bpl, i64 0, i64 0), %1 ], [ %15, %9 ] + %11 = phi i32* [ getelementptr inbounds ([6 x i32], [6 x i32]* @dec_del_dltx, i64 0, i64 0), %1 ], [ %14, %9 ] + %12 = phi i64 [ %8, %1 ], [ %21, %9 ] + %13 = phi i32 [ 1, %1 ], [ %22, %9 ] + %14 = getelementptr inbounds i32, i32* %11, i64 1 + %15 = getelementptr inbounds i32, i32* %10, i64 1 + %16 = load i32, i32* %15, align 4, !tbaa !8 + %17 = sext i32 %16 to i64 + %18 = load i32, i32* %14, align 4, !tbaa !8 + %19 = sext i32 %18 to i64 + %20 = mul nsw i64 %19, %17 + %21 = add nsw i64 %20, %12 + %22 = add nuw nsw i32 %13, 1 + %23 = icmp eq i32 %22, 6 + br i1 %23, label %24, label %9, !llvm.loop !13 + +24: ; preds = %9 + %25 = lshr i64 %21, 14 + %26 = trunc i64 %25 to i32 + store i32 %26, i32* @dec_szl, align 4, !tbaa !8 + %27 = load i32, i32* @dec_rlt1, align 4, !tbaa !8 + %28 = load i32, i32* @dec_al1, align 4, !tbaa !8 + %29 = load i32, i32* @dec_rlt2, align 4, !tbaa !8 + %30 = load i32, i32* @dec_al2, align 4, !tbaa !8 + %31 = shl nsw i32 %27, 1 + %32 = sext i32 %31 to i64 + %33 = sext i32 %28 to i64 + %34 = mul nsw i64 %33, %32 + %35 = shl nsw i32 %29, 1 + %36 = sext i32 %35 to i64 + %37 = sext i32 %30 to i64 + %38 = mul nsw i64 %37, %36 + %39 = add nsw i64 %38, %34 + %40 = lshr i64 %39, 15 + %41 = trunc i64 %40 to i32 + store i32 %41, i32* @dec_spl, align 4, !tbaa !8 + %42 = add nsw i32 %41, %26 + store i32 %42, i32* @dec_sl, align 4, !tbaa !8 + %43 = load i32, i32* @dec_detl, align 4, !tbaa !8 + %44 = sext i32 %43 to i64 + %45 = lshr i32 %2, 2 + %46 = zext i32 %45 to i64 + %47 = getelementptr inbounds [16 x i32], [16 x i32]* @qq4_code4_table, i64 0, i64 %46 + %48 = load i32, i32* %47, align 4, !tbaa !8 + %49 = sext i32 %48 to i64 + %50 = mul nsw i64 %49, %44 + %51 = lshr i64 %50, 15 + %52 = trunc i64 %51 to i32 + store i32 %52, i32* @dec_dlt, align 4, !tbaa !8 + %53 = load i32, i32* @il, align 4, !tbaa !8 + %54 = sext i32 %53 to i64 + %55 = getelementptr inbounds [64 x i32], [64 x i32]* @qq6_code6_table, i64 0, i64 %54 + %56 = load i32, i32* %55, align 4, !tbaa !8 + %57 = sext i32 %56 to i64 + %58 = mul nsw i64 %57, %44 + %59 = lshr i64 %58, 15 + %60 = trunc i64 %59 to i32 + store i32 %60, i32* @dl, align 4, !tbaa !8 + %61 = add nsw i32 %42, %60 + store i32 %61, i32* @rl, align 4, !tbaa !8 + %62 = load i32, i32* @dec_nbl, align 4, !tbaa !8 + %63 = sext i32 %62 to i64 + %64 = mul nsw i64 %63, 127 + %65 = lshr i64 %64, 7 + %66 = trunc i64 %65 to i32 + %67 = getelementptr inbounds [16 x i32], [16 x i32]* @wl_code_table, i64 0, i64 %46 + %68 = load i32, i32* %67, align 4, !tbaa !8 + %69 = add nsw i32 %68, %66 + %70 = icmp sgt i32 %69, 0 + %71 = select i1 %70, i32 %69, i32 0 + %72 = icmp slt i32 %71, 18432 + %73 = select i1 %72, i32 %71, i32 18432 + store i32 %73, i32* @dec_nbl, align 4, !tbaa !8 + %74 = lshr i32 %73, 6 + %75 = and i32 %74, 31 + %76 = lshr i32 %73, 11 + %77 = zext i32 %75 to i64 + %78 = getelementptr inbounds [32 x i32], [32 x i32]* @ilb_table, i64 0, i64 %77 + %79 = load i32, i32* %78, align 4, !tbaa !8 + %80 = sub nsw i32 9, %76 + %81 = ashr i32 %79, %80 + %82 = shl i32 %81, 3 + store i32 %82, i32* @dec_detl, align 4, !tbaa !8 + %83 = add nsw i32 %52, %26 + store i32 %83, i32* @dec_plt, align 4, !tbaa !8 + %84 = icmp eq i32 %52, 0 + br i1 %84, label %88, label %85 + +85: ; preds = %24 + %86 = shl i64 %51, 32 + %87 = ashr exact i64 %86, 32 + br label %98 + +88: ; preds = %24, %88 + %89 = phi i64 [ %96, %88 ], [ 0, %24 ] + %90 = getelementptr inbounds [6 x i32], [6 x i32]* @dec_del_bpl, i64 0, i64 %89 + %91 = load i32, i32* %90, align 4, !tbaa !8 + %92 = sext i32 %91 to i64 + %93 = mul nsw i64 %92, 255 + %94 = lshr i64 %93, 8 + %95 = trunc i64 %94 to i32 + store i32 %95, i32* %90, align 4, !tbaa !8 + %96 = add nuw nsw i64 %89, 1 + %97 = icmp eq i64 %96, 6 + br i1 %97, label %115, label %88, !llvm.loop !15 + +98: ; preds = %98, %85 + %99 = phi i64 [ 0, %85 ], [ %113, %98 ] + %100 = getelementptr inbounds [6 x i32], [6 x i32]* @dec_del_dltx, i64 0, i64 %99 + %101 = load i32, i32* %100, align 4, !tbaa !8 + %102 = sext i32 %101 to i64 + %103 = mul nsw i64 %87, %102 + %104 = icmp sgt i64 %103, -1 + %105 = select i1 %104, i32 128, i32 -128 + %106 = getelementptr inbounds [6 x i32], [6 x i32]* @dec_del_bpl, i64 0, i64 %99 + %107 = load i32, i32* %106, align 4, !tbaa !8 + %108 = sext i32 %107 to i64 + %109 = mul nsw i64 %108, 255 + %110 = lshr i64 %109, 8 + %111 = trunc i64 %110 to i32 + %112 = add nsw i32 %105, %111 + store i32 %112, i32* %106, align 4, !tbaa !8 + %113 = add nuw nsw i64 %99, 1 + %114 = icmp eq i64 %113, 6 + br i1 %114, label %115, label %98, !llvm.loop !16 + +115: ; preds = %98, %88 + %116 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @dec_del_dltx, i64 0, i64 4), align 16, !tbaa !8 + store i32 %116, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @dec_del_dltx, i64 0, i64 5), align 4, !tbaa !8 + %117 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @dec_del_dltx, i64 0, i64 3), align 4, !tbaa !8 + store i32 %117, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @dec_del_dltx, i64 0, i64 4), align 16, !tbaa !8 + %118 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @dec_del_dltx, i64 0, i64 2), align 8, !tbaa !8 + store i32 %118, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @dec_del_dltx, i64 0, i64 3), align 4, !tbaa !8 + store i32 %6, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @dec_del_dltx, i64 0, i64 1), align 4, !tbaa !8 + store i32 %52, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @dec_del_dltx, i64 0, i64 0), align 16, !tbaa !8 + %119 = load i32, i32* @dec_plt1, align 4, !tbaa !8 + %120 = load i32, i32* @dec_plt2, align 4, !tbaa !8 + %121 = shl nsw i64 %33, 2 + %122 = sext i32 %83 to i64 + %123 = sext i32 %119 to i64 + %124 = mul nsw i64 %123, %122 + %125 = icmp sgt i64 %124, -1 + %126 = sub nsw i64 0, %121 + %127 = select i1 %125, i64 %126, i64 %121 + %128 = lshr i64 %127, 7 + %129 = sext i32 %120 to i64 + %130 = mul nsw i64 %129, %122 + %131 = icmp sgt i64 %130, -1 + %132 = select i1 %131, i64 128, i64 4294967168 + %133 = mul nsw i64 %37, 127 + %134 = lshr i64 %133, 7 + %135 = add nuw nsw i64 %132, %134 + %136 = add nuw nsw i64 %135, %128 + %137 = trunc i64 %136 to i32 + %138 = icmp slt i32 %137, 12288 + %139 = select i1 %138, i32 %137, i32 12288 + %140 = icmp sgt i32 %139, -12288 + %141 = select i1 %140, i32 %139, i32 -12288 + store i32 %141, i32* @dec_al2, align 4, !tbaa !8 + %142 = mul nsw i64 %33, 255 + %143 = lshr i64 %142, 8 + %144 = trunc i64 %143 to i32 + %145 = select i1 %125, i32 192, i32 -192 + %146 = add nsw i32 %145, %144 + %147 = add nsw i32 %141, -15360 + %148 = sub nsw i32 15360, %141 + %149 = icmp sgt i32 %146, %148 + %150 = select i1 %149, i32 %148, i32 %146 + %151 = icmp slt i32 %150, %147 + %152 = select i1 %151, i32 %147, i32 %150 + store i32 %152, i32* @dec_al1, align 4, !tbaa !8 + %153 = add nsw i32 %42, %52 + store i32 %153, i32* @dec_rlt, align 4, !tbaa !8 + store i32 %27, i32* @dec_rlt2, align 4, !tbaa !8 + store i32 %153, i32* @dec_rlt1, align 4, !tbaa !8 + store i32 %119, i32* @dec_plt2, align 4, !tbaa !8 + store i32 %83, i32* @dec_plt1, align 4, !tbaa !8 + %154 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @dec_del_bph, i64 0, i64 0), align 16, !tbaa !8 + %155 = sext i32 %154 to i64 + %156 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @dec_del_dhx, i64 0, i64 0), align 16, !tbaa !8 + %157 = sext i32 %156 to i64 + %158 = mul nsw i64 %157, %155 + br label %159 + +159: ; preds = %159, %115 + %160 = phi i32* [ getelementptr inbounds ([6 x i32], [6 x i32]* @dec_del_bph, i64 0, i64 0), %115 ], [ %165, %159 ] + %161 = phi i32* [ getelementptr inbounds ([6 x i32], [6 x i32]* @dec_del_dhx, i64 0, i64 0), %115 ], [ %164, %159 ] + %162 = phi i64 [ %158, %115 ], [ %171, %159 ] + %163 = phi i32 [ 1, %115 ], [ %172, %159 ] + %164 = getelementptr inbounds i32, i32* %161, i64 1 + %165 = getelementptr inbounds i32, i32* %160, i64 1 + %166 = load i32, i32* %165, align 4, !tbaa !8 + %167 = sext i32 %166 to i64 + %168 = load i32, i32* %164, align 4, !tbaa !8 + %169 = sext i32 %168 to i64 + %170 = mul nsw i64 %169, %167 + %171 = add nsw i64 %170, %162 + %172 = add nuw nsw i32 %163, 1 + %173 = icmp eq i32 %172, 6 + br i1 %173, label %174, label %159, !llvm.loop !13 + +174: ; preds = %159 + %175 = lshr i64 %171, 14 + %176 = trunc i64 %175 to i32 + store i32 %176, i32* @dec_szh, align 4, !tbaa !8 + %177 = load i32, i32* @dec_rh1, align 4, !tbaa !8 + %178 = load i32, i32* @dec_ah1, align 4, !tbaa !8 + %179 = load i32, i32* @dec_rh2, align 4, !tbaa !8 + %180 = load i32, i32* @dec_ah2, align 4, !tbaa !8 + %181 = shl nsw i32 %177, 1 + %182 = sext i32 %181 to i64 + %183 = sext i32 %178 to i64 + %184 = mul nsw i64 %183, %182 + %185 = shl nsw i32 %179, 1 + %186 = sext i32 %185 to i64 + %187 = sext i32 %180 to i64 + %188 = mul nsw i64 %187, %186 + %189 = add nsw i64 %188, %184 + %190 = lshr i64 %189, 15 + %191 = trunc i64 %190 to i32 + store i32 %191, i32* @dec_sph, align 4, !tbaa !8 + %192 = add nsw i32 %191, %176 + store i32 %192, i32* @dec_sh, align 4, !tbaa !8 + %193 = load i32, i32* @dec_deth, align 4, !tbaa !8 + %194 = sext i32 %193 to i64 + %195 = sext i32 %3 to i64 + %196 = getelementptr inbounds [4 x i32], [4 x i32]* @qq2_code2_table, i64 0, i64 %195 + %197 = load i32, i32* %196, align 4, !tbaa !8 + %198 = sext i32 %197 to i64 + %199 = mul nsw i64 %198, %194 + %200 = lshr i64 %199, 15 + %201 = trunc i64 %200 to i32 + store i32 %201, i32* @dec_dh, align 4, !tbaa !8 + %202 = load i32, i32* @dec_nbh, align 4, !tbaa !8 + %203 = sext i32 %202 to i64 + %204 = mul nsw i64 %203, 127 + %205 = lshr i64 %204, 7 + %206 = trunc i64 %205 to i32 + %207 = getelementptr inbounds [4 x i32], [4 x i32]* @wh_code_table, i64 0, i64 %195 + %208 = load i32, i32* %207, align 4, !tbaa !8 + %209 = add nsw i32 %208, %206 + %210 = icmp sgt i32 %209, 0 + %211 = select i1 %210, i32 %209, i32 0 + %212 = icmp slt i32 %211, 22528 + %213 = select i1 %212, i32 %211, i32 22528 + store i32 %213, i32* @dec_nbh, align 4, !tbaa !8 + %214 = lshr i32 %213, 6 + %215 = and i32 %214, 31 + %216 = lshr i32 %213, 11 + %217 = zext i32 %215 to i64 + %218 = getelementptr inbounds [32 x i32], [32 x i32]* @ilb_table, i64 0, i64 %217 + %219 = load i32, i32* %218, align 4, !tbaa !8 + %220 = sub nsw i32 11, %216 + %221 = ashr i32 %219, %220 + %222 = shl i32 %221, 3 + store i32 %222, i32* @dec_deth, align 4, !tbaa !8 + %223 = add nsw i32 %201, %176 + store i32 %223, i32* @dec_ph, align 4, !tbaa !8 + %224 = icmp eq i32 %201, 0 + br i1 %224, label %228, label %225 + +225: ; preds = %174 + %226 = shl i64 %200, 32 + %227 = ashr exact i64 %226, 32 + br label %238 + +228: ; preds = %174, %228 + %229 = phi i64 [ %236, %228 ], [ 0, %174 ] + %230 = getelementptr inbounds [6 x i32], [6 x i32]* @dec_del_bph, i64 0, i64 %229 + %231 = load i32, i32* %230, align 4, !tbaa !8 + %232 = sext i32 %231 to i64 + %233 = mul nsw i64 %232, 255 + %234 = lshr i64 %233, 8 + %235 = trunc i64 %234 to i32 + store i32 %235, i32* %230, align 4, !tbaa !8 + %236 = add nuw nsw i64 %229, 1 + %237 = icmp eq i64 %236, 6 + br i1 %237, label %255, label %228, !llvm.loop !15 + +238: ; preds = %238, %225 + %239 = phi i64 [ 0, %225 ], [ %253, %238 ] + %240 = getelementptr inbounds [6 x i32], [6 x i32]* @dec_del_dhx, i64 0, i64 %239 + %241 = load i32, i32* %240, align 4, !tbaa !8 + %242 = sext i32 %241 to i64 + %243 = mul nsw i64 %227, %242 + %244 = icmp sgt i64 %243, -1 + %245 = select i1 %244, i32 128, i32 -128 + %246 = getelementptr inbounds [6 x i32], [6 x i32]* @dec_del_bph, i64 0, i64 %239 + %247 = load i32, i32* %246, align 4, !tbaa !8 + %248 = sext i32 %247 to i64 + %249 = mul nsw i64 %248, 255 + %250 = lshr i64 %249, 8 + %251 = trunc i64 %250 to i32 + %252 = add nsw i32 %245, %251 + store i32 %252, i32* %246, align 4, !tbaa !8 + %253 = add nuw nsw i64 %239, 1 + %254 = icmp eq i64 %253, 6 + br i1 %254, label %255, label %238, !llvm.loop !16 + +255: ; preds = %238, %228 + %256 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @dec_del_dhx, i64 0, i64 4), align 16, !tbaa !8 + store i32 %256, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @dec_del_dhx, i64 0, i64 5), align 4, !tbaa !8 + %257 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @dec_del_dhx, i64 0, i64 3), align 4, !tbaa !8 + store i32 %257, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @dec_del_dhx, i64 0, i64 4), align 16, !tbaa !8 + %258 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @dec_del_dhx, i64 0, i64 2), align 8, !tbaa !8 + store i32 %258, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @dec_del_dhx, i64 0, i64 3), align 4, !tbaa !8 + store i32 %156, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @dec_del_dhx, i64 0, i64 1), align 4, !tbaa !8 + store i32 %201, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @dec_del_dhx, i64 0, i64 0), align 16, !tbaa !8 + %259 = load i32, i32* @dec_ph1, align 4, !tbaa !8 + %260 = load i32, i32* @dec_ph2, align 4, !tbaa !8 + %261 = shl nsw i64 %183, 2 + %262 = sext i32 %223 to i64 + %263 = sext i32 %259 to i64 + %264 = mul nsw i64 %263, %262 + %265 = icmp sgt i64 %264, -1 + %266 = sub nsw i64 0, %261 + %267 = select i1 %265, i64 %266, i64 %261 + %268 = lshr i64 %267, 7 + %269 = sext i32 %260 to i64 + %270 = mul nsw i64 %269, %262 + %271 = icmp sgt i64 %270, -1 + %272 = select i1 %271, i64 128, i64 4294967168 + %273 = mul nsw i64 %187, 127 + %274 = lshr i64 %273, 7 + %275 = add nuw nsw i64 %272, %274 + %276 = add nuw nsw i64 %275, %268 + %277 = trunc i64 %276 to i32 + %278 = icmp slt i32 %277, 12288 + %279 = select i1 %278, i32 %277, i32 12288 + %280 = icmp sgt i32 %279, -12288 + %281 = select i1 %280, i32 %279, i32 -12288 + store i32 %281, i32* @dec_ah2, align 4, !tbaa !8 + %282 = mul nsw i64 %183, 255 + %283 = lshr i64 %282, 8 + %284 = trunc i64 %283 to i32 + %285 = select i1 %265, i32 192, i32 -192 + %286 = add nsw i32 %285, %284 + %287 = add nsw i32 %281, -15360 + %288 = sub nsw i32 15360, %281 + %289 = icmp sgt i32 %286, %288 + %290 = select i1 %289, i32 %288, i32 %286 + %291 = icmp slt i32 %290, %287 + %292 = select i1 %291, i32 %287, i32 %290 + store i32 %292, i32* @dec_ah1, align 4, !tbaa !8 + %293 = add nsw i32 %192, %201 + store i32 %293, i32* @rh, align 4, !tbaa !8 + store i32 %177, i32* @dec_rh2, align 4, !tbaa !8 + store i32 %293, i32* @dec_rh1, align 4, !tbaa !8 + store i32 %259, i32* @dec_ph2, align 4, !tbaa !8 + store i32 %223, i32* @dec_ph1, align 4, !tbaa !8 + %294 = sub nsw i32 %61, %293 + store i32 %294, i32* @xd, align 4, !tbaa !8 + %295 = add nsw i32 %293, %61 + store i32 %295, i32* @xs, align 4, !tbaa !8 + %296 = sext i32 %294 to i64 + %297 = load i32, i32* getelementptr inbounds ([24 x i32], [24 x i32]* @h, i64 0, i64 0), align 16, !tbaa !8 + %298 = sext i32 %297 to i64 + %299 = mul nsw i64 %298, %296 + %300 = sext i32 %295 to i64 + %301 = load i32, i32* getelementptr inbounds ([24 x i32], [24 x i32]* @h, i64 0, i64 1), align 4, !tbaa !8 + %302 = sext i32 %301 to i64 + %303 = mul nsw i64 %302, %300 + br label %304 + +304: ; preds = %255, %304 + %305 = phi i32* [ getelementptr inbounds ([11 x i32], [11 x i32]* @accumd, i64 0, i64 0), %255 ], [ %319, %304 ] + %306 = phi i32* [ getelementptr inbounds ([11 x i32], [11 x i32]* @accumc, i64 0, i64 0), %255 ], [ %311, %304 ] + %307 = phi i32* [ getelementptr inbounds ([24 x i32], [24 x i32]* @h, i64 0, i64 2), %255 ], [ %322, %304 ] + %308 = phi i64 [ %303, %255 ], [ %326, %304 ] + %309 = phi i64 [ %299, %255 ], [ %318, %304 ] + %310 = phi i32 [ 0, %255 ], [ %327, %304 ] + %311 = getelementptr inbounds i32, i32* %306, i64 1 + %312 = load i32, i32* %306, align 4, !tbaa !8 + %313 = sext i32 %312 to i64 + %314 = getelementptr inbounds i32, i32* %307, i64 1 + %315 = load i32, i32* %307, align 4, !tbaa !8 + %316 = sext i32 %315 to i64 + %317 = mul nsw i64 %316, %313 + %318 = add nsw i64 %317, %309 + %319 = getelementptr inbounds i32, i32* %305, i64 1 + %320 = load i32, i32* %305, align 4, !tbaa !8 + %321 = sext i32 %320 to i64 + %322 = getelementptr inbounds i32, i32* %307, i64 2 + %323 = load i32, i32* %314, align 4, !tbaa !8 + %324 = sext i32 %323 to i64 + %325 = mul nsw i64 %324, %321 + %326 = add nsw i64 %325, %308 + %327 = add nuw nsw i32 %310, 1 + %328 = icmp eq i32 %327, 10 + br i1 %328, label %329, label %304, !llvm.loop !17 + +329: ; preds = %304 + %330 = load i32, i32* getelementptr inbounds ([11 x i32], [11 x i32]* @accumc, i64 0, i64 10), align 8, !tbaa !8 + %331 = sext i32 %330 to i64 + %332 = load i32, i32* getelementptr inbounds ([24 x i32], [24 x i32]* @h, i64 0, i64 22), align 8, !tbaa !8 + %333 = sext i32 %332 to i64 + %334 = mul nsw i64 %333, %331 + %335 = add nsw i64 %334, %318 + %336 = load i32, i32* getelementptr inbounds ([11 x i32], [11 x i32]* @accumd, i64 0, i64 10), align 8, !tbaa !8 + %337 = sext i32 %336 to i64 + %338 = load i32, i32* getelementptr inbounds ([24 x i32], [24 x i32]* @h, i64 0, i64 23), align 4, !tbaa !8 + %339 = sext i32 %338 to i64 + %340 = mul nsw i64 %339, %337 + %341 = add nsw i64 %340, %326 + %342 = lshr i64 %335, 14 + %343 = trunc i64 %342 to i32 + store i32 %343, i32* @xout1, align 4, !tbaa !8 + %344 = lshr i64 %341, 14 + %345 = trunc i64 %344 to i32 + store i32 %345, i32* @xout2, align 4, !tbaa !8 + call void @llvm.memmove.p0i8.p0i8.i64(i8* noundef nonnull align 4 dereferenceable(40) bitcast (i32* getelementptr inbounds ([11 x i32], [11 x i32]* @accumc, i64 0, i64 1) to i8*), i8* noundef nonnull align 16 dereferenceable(40) bitcast ([11 x i32]* @accumc to i8*), i64 40, i1 false) + call void @llvm.memmove.p0i8.p0i8.i64(i8* noundef nonnull align 4 dereferenceable(40) bitcast (i32* getelementptr inbounds ([11 x i32], [11 x i32]* @accumd, i64 0, i64 1) to i8*), i8* noundef nonnull align 16 dereferenceable(40) bitcast ([11 x i32]* @accumd to i8*), i64 40, i1 false) + store i32 %294, i32* getelementptr inbounds ([11 x i32], [11 x i32]* @accumc, i64 0, i64 0), align 16, !tbaa !8 + store i32 %295, i32* getelementptr inbounds ([11 x i32], [11 x i32]* @accumd, i64 0, i64 0), align 16, !tbaa !8 + ret void +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable writeonly +define dso_local void @reset() local_unnamed_addr #7 { + store i32 32, i32* @dec_detl, align 4, !tbaa !8 + store i32 32, i32* @detl, align 4, !tbaa !8 + store i32 8, i32* @dec_deth, align 4, !tbaa !8 + store i32 8, i32* @deth, align 4, !tbaa !8 + store i32 0, i32* @rlt2, align 4, !tbaa !8 + store i32 0, i32* @rlt1, align 4, !tbaa !8 + store i32 0, i32* @plt2, align 4, !tbaa !8 + store i32 0, i32* @plt1, align 4, !tbaa !8 + store i32 0, i32* @al2, align 4, !tbaa !8 + store i32 0, i32* @al1, align 4, !tbaa !8 + store i32 0, i32* @nbl, align 4, !tbaa !8 + store i32 0, i32* @rh2, align 4, !tbaa !8 + store i32 0, i32* @rh1, align 4, !tbaa !8 + store i32 0, i32* @ph2, align 4, !tbaa !8 + store i32 0, i32* @ph1, align 4, !tbaa !8 + store i32 0, i32* @ah2, align 4, !tbaa !8 + store i32 0, i32* @ah1, align 4, !tbaa !8 + store i32 0, i32* @nbh, align 4, !tbaa !8 + store i32 0, i32* @dec_rlt2, align 4, !tbaa !8 + store i32 0, i32* @dec_rlt1, align 4, !tbaa !8 + store i32 0, i32* @dec_plt2, align 4, !tbaa !8 + store i32 0, i32* @dec_plt1, align 4, !tbaa !8 + store i32 0, i32* @dec_al2, align 4, !tbaa !8 + store i32 0, i32* @dec_al1, align 4, !tbaa !8 + store i32 0, i32* @dec_nbl, align 4, !tbaa !8 + store i32 0, i32* @dec_rh2, align 4, !tbaa !8 + store i32 0, i32* @dec_rh1, align 4, !tbaa !8 + store i32 0, i32* @dec_ph2, align 4, !tbaa !8 + store i32 0, i32* @dec_ph1, align 4, !tbaa !8 + store i32 0, i32* @dec_ah2, align 4, !tbaa !8 + store i32 0, i32* @dec_ah1, align 4, !tbaa !8 + store i32 0, i32* @dec_nbh, align 4, !tbaa !8 + call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(24) bitcast ([6 x i32]* @delay_dltx to i8*), i8 0, i64 24, i1 false) + call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(24) bitcast ([6 x i32]* @delay_dhx to i8*), i8 0, i64 24, i1 false) + call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(24) bitcast ([6 x i32]* @dec_del_dltx to i8*), i8 0, i64 24, i1 false) + call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(24) bitcast ([6 x i32]* @dec_del_dhx to i8*), i8 0, i64 24, i1 false) + call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(24) bitcast ([6 x i32]* @delay_bpl to i8*), i8 0, i64 24, i1 false) + call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(24) bitcast ([6 x i32]* @delay_bph to i8*), i8 0, i64 24, i1 false) + call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(24) bitcast ([6 x i32]* @dec_del_bpl to i8*), i8 0, i64 24, i1 false) + call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(24) bitcast ([6 x i32]* @dec_del_bph to i8*), i8 0, i64 24, i1 false) + call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(92) bitcast ([24 x i32]* @tqmf to i8*), i8 0, i64 92, i1 false) + call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(44) bitcast ([11 x i32]* @accumc to i8*), i8 0, i64 44, i1 false) + call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(44) bitcast ([11 x i32]* @accumd to i8*), i8 0, i64 44, i1 false) + ret void +} + +; Function Attrs: nofree nosync nounwind sspstrong uwtable +define dso_local i32 @main() local_unnamed_addr #2 { + store i32 32, i32* @dec_detl, align 4, !tbaa !8 + store i32 32, i32* @detl, align 4, !tbaa !8 + store i32 8, i32* @dec_deth, align 4, !tbaa !8 + store i32 8, i32* @deth, align 4, !tbaa !8 + store i32 0, i32* @rlt2, align 4, !tbaa !8 + store i32 0, i32* @rlt1, align 4, !tbaa !8 + store i32 0, i32* @plt2, align 4, !tbaa !8 + store i32 0, i32* @plt1, align 4, !tbaa !8 + store i32 0, i32* @al2, align 4, !tbaa !8 + store i32 0, i32* @al1, align 4, !tbaa !8 + store i32 0, i32* @nbl, align 4, !tbaa !8 + store i32 0, i32* @rh2, align 4, !tbaa !8 + store i32 0, i32* @rh1, align 4, !tbaa !8 + store i32 0, i32* @ph2, align 4, !tbaa !8 + store i32 0, i32* @ph1, align 4, !tbaa !8 + store i32 0, i32* @ah2, align 4, !tbaa !8 + store i32 0, i32* @ah1, align 4, !tbaa !8 + store i32 0, i32* @nbh, align 4, !tbaa !8 + store i32 0, i32* @dec_rlt2, align 4, !tbaa !8 + store i32 0, i32* @dec_rlt1, align 4, !tbaa !8 + store i32 0, i32* @dec_plt2, align 4, !tbaa !8 + store i32 0, i32* @dec_plt1, align 4, !tbaa !8 + store i32 0, i32* @dec_al2, align 4, !tbaa !8 + store i32 0, i32* @dec_al1, align 4, !tbaa !8 + store i32 0, i32* @dec_nbl, align 4, !tbaa !8 + store i32 0, i32* @dec_rh2, align 4, !tbaa !8 + store i32 0, i32* @dec_rh1, align 4, !tbaa !8 + store i32 0, i32* @dec_ph2, align 4, !tbaa !8 + store i32 0, i32* @dec_ph1, align 4, !tbaa !8 + store i32 0, i32* @dec_ah2, align 4, !tbaa !8 + store i32 0, i32* @dec_ah1, align 4, !tbaa !8 + store i32 0, i32* @dec_nbh, align 4, !tbaa !8 + call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(24) bitcast ([6 x i32]* @delay_dltx to i8*), i8 0, i64 24, i1 false) #11 + call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(24) bitcast ([6 x i32]* @delay_dhx to i8*), i8 0, i64 24, i1 false) #11 + call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(24) bitcast ([6 x i32]* @dec_del_dltx to i8*), i8 0, i64 24, i1 false) #11 + call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(24) bitcast ([6 x i32]* @dec_del_dhx to i8*), i8 0, i64 24, i1 false) #11 + call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(24) bitcast ([6 x i32]* @delay_bpl to i8*), i8 0, i64 24, i1 false) #11 + call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(24) bitcast ([6 x i32]* @delay_bph to i8*), i8 0, i64 24, i1 false) #11 + call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(24) bitcast ([6 x i32]* @dec_del_bpl to i8*), i8 0, i64 24, i1 false) #11 + call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(24) bitcast ([6 x i32]* @dec_del_bph to i8*), i8 0, i64 24, i1 false) #11 + call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(92) bitcast ([24 x i32]* @tqmf to i8*), i8 0, i64 92, i1 false) #11 + call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(44) bitcast ([11 x i32]* @accumc to i8*), i8 0, i64 44, i1 false) #11 + call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(44) bitcast ([11 x i32]* @accumd to i8*), i8 0, i64 44, i1 false) #11 + br label %1 + +1: ; preds = %0, %37 + %2 = phi i64 [ 0, %0 ], [ %41, %37 ] + %3 = mul nuw nsw i64 %2, 6282000 + %4 = sub nsw i64 1570, %3 + %5 = trunc i64 %4 to i32 + %6 = call i32 @llvm.smax.i32(i32 %5, i32 -6282) #11 + %7 = trunc i64 %3 to i32 + %8 = add i32 %7, -1570 + %9 = add i32 %8, %6 + %10 = icmp ne i32 %9, 0 + %11 = zext i1 %10 to i32 + %12 = sub i32 %9, %11 + %13 = udiv i32 %12, 6282 + %14 = add nuw nsw i32 %13, %11 + %15 = mul i32 %14, 6282 + %16 = trunc i64 %4 to i32 + %17 = add i32 %15, %16 + %18 = mul i32 %17, %17 + %19 = sub i32 0, %18 + %20 = mul nsw i32 %17, %19 + %21 = sdiv i32 %20, 6 + %22 = add nsw i32 %21, %17 + %23 = add i32 %20, 5 + %24 = icmp ult i32 %23, 11 + br i1 %24, label %37, label %25 + +25: ; preds = %1, %25 + %26 = phi i32 [ %35, %25 ], [ 2, %1 ] + %27 = phi i32 [ %34, %25 ], [ %22, %1 ] + %28 = phi i32 [ %33, %25 ], [ %21, %1 ] + %29 = mul nsw i32 %28, %19 + %30 = shl nuw nsw i32 %26, 1 + %31 = or i32 %30, 1 + %32 = mul nsw i32 %31, %30 + %33 = sdiv i32 %29, %32 + %34 = add nsw i32 %33, %27 + %35 = add nuw nsw i32 %26, 1 + %36 = icmp eq i32 %33, 0 + br i1 %36, label %37, label %25, !llvm.loop !5 + +37: ; preds = %25, %1 + %38 = phi i32 [ %22, %1 ], [ %34, %25 ] + %39 = mul nsw i32 %38, 10 + %40 = getelementptr inbounds [6 x i32], [6 x i32]* @main.test_data, i64 0, i64 %2 + store i32 %39, i32* %40, align 4, !tbaa !8 + %41 = add nuw nsw i64 %2, 1 + %42 = icmp eq i64 %41, 3 + br i1 %42, label %43, label %1, !llvm.loop !18 + +43: ; preds = %37, %43 + %44 = phi i64 [ %53, %43 ], [ 0, %37 ] + %45 = getelementptr inbounds [6 x i32], [6 x i32]* @main.test_data, i64 0, i64 %44 + %46 = load i32, i32* %45, align 8, !tbaa !8 + %47 = or i64 %44, 1 + %48 = getelementptr inbounds [6 x i32], [6 x i32]* @main.test_data, i64 0, i64 %47 + %49 = load i32, i32* %48, align 4, !tbaa !8 + %50 = call i32 @encode(i32 %46, i32 %49) + %51 = lshr exact i64 %44, 1 + %52 = getelementptr inbounds [3 x i32], [3 x i32]* @main.compressed, i64 0, i64 %51 + store i32 %50, i32* %52, align 4, !tbaa !8 + %53 = add nuw nsw i64 %44, 2 + %54 = icmp eq i64 %44, 0 + br i1 %54, label %43, label %55, !llvm.loop !19 + +55: ; preds = %43, %55 + %56 = phi i64 [ %65, %55 ], [ 0, %43 ] + %57 = lshr exact i64 %56, 1 + %58 = getelementptr inbounds [3 x i32], [3 x i32]* @main.compressed, i64 0, i64 %57 + %59 = load i32, i32* %58, align 4, !tbaa !8 + call void @decode(i32 %59) + %60 = load i32, i32* @xout1, align 4, !tbaa !8 + %61 = getelementptr inbounds [6 x i32], [6 x i32]* @main.result, i64 0, i64 %56 + store i32 %60, i32* %61, align 8, !tbaa !8 + %62 = load i32, i32* @xout2, align 4, !tbaa !8 + %63 = or i64 %56, 1 + %64 = getelementptr inbounds [6 x i32], [6 x i32]* @main.result, i64 0, i64 %63 + store i32 %62, i32* %64, align 4, !tbaa !8 + %65 = add nuw nsw i64 %56, 2 + %66 = icmp eq i64 %56, 0 + br i1 %66, label %55, label %67, !llvm.loop !20 + +67: ; preds = %55 + %68 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @main.result, i64 0, i64 4), align 16, !tbaa !8 + %69 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @main.result, i64 0, i64 5), align 4, !tbaa !8 + %70 = add nsw i32 %69, %68 + ret i32 %70 +} + +; Function Attrs: nofree nosync nounwind readnone speculatable willreturn +declare i32 @llvm.abs.i32(i32, i1 immarg) #8 + +; Function Attrs: nofree nosync nounwind readnone speculatable willreturn +declare i32 @llvm.smin.i32(i32, i32) #8 + +; Function Attrs: nofree nosync nounwind readnone speculatable willreturn +declare i32 @llvm.smax.i32(i32, i32) #8 + +; Function Attrs: argmemonly nofree nounwind willreturn +declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1 immarg) #9 + +; Function Attrs: argmemonly nofree nounwind willreturn writeonly +declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg) #10 + +attributes #0 = { mustprogress nofree norecurse nosync nounwind readnone sspstrong uwtable willreturn "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #1 = { nofree nosync nounwind readnone sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #2 = { nofree nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #3 = { nofree norecurse nosync nounwind readonly sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #4 = { nofree nosync nounwind readonly sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #5 = { mustprogress nofree norecurse nosync nounwind readonly sspstrong uwtable willreturn "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #6 = { nofree norecurse nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #7 = { nofree norecurse nosync nounwind sspstrong uwtable writeonly "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #8 = { nofree nosync nounwind readnone speculatable willreturn } +attributes #9 = { argmemonly nofree nounwind willreturn } +attributes #10 = { argmemonly nofree nounwind willreturn writeonly } +attributes #11 = { nounwind } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = distinct !{!5, !6, !7} +!6 = !{!"llvm.loop.mustprogress"} +!7 = !{!"llvm.loop.unroll.disable"} +!8 = !{!9, !9, i64 0} +!9 = !{!"int", !10, i64 0} +!10 = !{!"omnipotent char", !11, i64 0} +!11 = !{!"Simple C/C++ TBAA"} +!12 = distinct !{!12, !6, !7} +!13 = distinct !{!13, !6, !7} +!14 = distinct !{!14, !6, !7} +!15 = distinct !{!15, !6, !7} +!16 = distinct !{!16, !6, !7} +!17 = distinct !{!17, !6, !7} +!18 = distinct !{!18, !6, !7} +!19 = distinct !{!19, !6, !7} +!20 = distinct !{!20, !6, !7} diff --git a/test/bs.ll b/test/bs.ll new file mode 100644 index 0000000..c231344 --- /dev/null +++ b/test/bs.ll @@ -0,0 +1,78 @@ +; ModuleID = 'bs.c' +source_filename = "bs.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +%struct.DATA = type { i32, i32 } + +@data = dso_local local_unnamed_addr global [15 x %struct.DATA] [%struct.DATA { i32 1, i32 100 }, %struct.DATA { i32 5, i32 200 }, %struct.DATA { i32 6, i32 300 }, %struct.DATA { i32 7, i32 700 }, %struct.DATA { i32 8, i32 900 }, %struct.DATA { i32 9, i32 250 }, %struct.DATA { i32 10, i32 400 }, %struct.DATA { i32 11, i32 600 }, %struct.DATA { i32 12, i32 800 }, %struct.DATA { i32 13, i32 1500 }, %struct.DATA { i32 14, i32 1200 }, %struct.DATA { i32 15, i32 110 }, %struct.DATA { i32 16, i32 140 }, %struct.DATA { i32 17, i32 133 }, %struct.DATA { i32 18, i32 10 }], align 16 + +; Function Attrs: nofree norecurse nosync nounwind readonly sspstrong uwtable +define dso_local i32 @main() local_unnamed_addr #0 { + ret i32 0 +} + +; Function Attrs: nofree norecurse nosync nounwind readonly sspstrong uwtable +define dso_local i32 @binary_search(i32 %0) local_unnamed_addr #0 { + br label %2 + +2: ; preds = %1, %22 + %3 = phi i32 [ 0, %1 ], [ %25, %22 ] + %4 = phi i32 [ 14, %1 ], [ %24, %22 ] + %5 = phi i32 [ -1, %1 ], [ %23, %22 ] + %6 = add nsw i32 %3, %4 + %7 = ashr i32 %6, 1 + %8 = sext i32 %7 to i64 + %9 = getelementptr inbounds [15 x %struct.DATA], [15 x %struct.DATA]* @data, i64 0, i64 %8, i32 0 + %10 = load i32, i32* %9, align 8, !tbaa !5 + %11 = icmp eq i32 %10, %0 + br i1 %11, label %12, label %16 + +12: ; preds = %2 + %13 = add nsw i32 %3, -1 + %14 = getelementptr inbounds [15 x %struct.DATA], [15 x %struct.DATA]* @data, i64 0, i64 %8, i32 1 + %15 = load i32, i32* %14, align 4, !tbaa !10 + br label %22 + +16: ; preds = %2 + %17 = icmp sgt i32 %10, %0 + br i1 %17, label %18, label %20 + +18: ; preds = %16 + %19 = add nsw i32 %7, -1 + br label %22 + +20: ; preds = %16 + %21 = add nsw i32 %7, 1 + br label %22 + +22: ; preds = %18, %20, %12 + %23 = phi i32 [ %15, %12 ], [ %5, %18 ], [ %5, %20 ] + %24 = phi i32 [ %13, %12 ], [ %19, %18 ], [ %4, %20 ] + %25 = phi i32 [ %3, %12 ], [ %3, %18 ], [ %21, %20 ] + %26 = icmp sgt i32 %25, %24 + br i1 %26, label %27, label %2, !llvm.loop !11 + +27: ; preds = %22 + ret i32 %23 +} + +attributes #0 = { nofree norecurse nosync nounwind readonly sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = !{!6, !7, i64 0} +!6 = !{!"DATA", !7, i64 0, !7, i64 4} +!7 = !{!"int", !8, i64 0} +!8 = !{!"omnipotent char", !9, i64 0} +!9 = !{!"Simple C/C++ TBAA"} +!10 = !{!6, !7, i64 4} +!11 = distinct !{!11, !12, !13} +!12 = !{!"llvm.loop.mustprogress"} +!13 = !{!"llvm.loop.unroll.disable"} diff --git a/test/bsort100.ll b/test/bsort100.ll new file mode 100644 index 0000000..0304c42 --- /dev/null +++ b/test/bsort100.ll @@ -0,0 +1,159 @@ +; ModuleID = 'bsort100.c' +source_filename = "bsort100.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +@Array = dso_local local_unnamed_addr global [101 x i32] zeroinitializer, align 16 +@factor = dso_local local_unnamed_addr global i32 0, align 4 +@Seed = dso_local local_unnamed_addr global i32 0, align 4 + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local i32 @main() local_unnamed_addr #0 { + store i32 -1, i32* @factor, align 4, !tbaa !5 + br label %1 + +1: ; preds = %1, %0 + %2 = phi i64 [ 1, %0 ], [ %9, %1 ] + %3 = load i8, i8* inttoptr (i64 2149580801 to i8*), align 1, !tbaa !9 + %4 = sext i8 %3 to i32 + %5 = trunc i64 %2 to i32 + %6 = mul i32 %5, %4 + %7 = sub i32 0, %6 + %8 = getelementptr inbounds [101 x i32], [101 x i32]* @Array, i64 0, i64 %2 + store i32 %7, i32* %8, align 4, !tbaa !5 + %9 = add nuw nsw i64 %2, 1 + %10 = icmp eq i64 %9, 101 + br i1 %10, label %15, label %1, !llvm.loop !10 + +11: ; preds = %31 + %12 = add nuw nsw i32 %17, 1 + %13 = add nsw i64 %16, -1 + %14 = icmp eq i32 %12, 100 + br i1 %14, label %33, label %15, !llvm.loop !13 + +15: ; preds = %1, %11 + %16 = phi i64 [ %13, %11 ], [ 100, %1 ] + %17 = phi i32 [ %12, %11 ], [ 1, %1 ] + br label %18 + +18: ; preds = %28, %15 + %19 = phi i64 [ 1, %15 ], [ %23, %28 ] + %20 = phi i32 [ 1, %15 ], [ %29, %28 ] + %21 = getelementptr inbounds [101 x i32], [101 x i32]* @Array, i64 0, i64 %19 + %22 = load i32, i32* %21, align 4, !tbaa !5 + %23 = add nuw nsw i64 %19, 1 + %24 = getelementptr inbounds [101 x i32], [101 x i32]* @Array, i64 0, i64 %23 + %25 = load i32, i32* %24, align 4, !tbaa !5 + %26 = icmp sgt i32 %22, %25 + br i1 %26, label %27, label %28 + +27: ; preds = %18 + store i32 %25, i32* %21, align 4, !tbaa !5 + store i32 %22, i32* %24, align 4, !tbaa !5 + br label %28 + +28: ; preds = %27, %18 + %29 = phi i32 [ 0, %27 ], [ %20, %18 ] + %30 = icmp eq i64 %23, %16 + br i1 %30, label %31, label %18, !llvm.loop !14 + +31: ; preds = %28 + %32 = icmp eq i32 %29, 0 + br i1 %32, label %11, label %33 + +33: ; preds = %11, %31 + ret i32 0 +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone sspstrong uwtable willreturn +define dso_local i32 @ttime() local_unnamed_addr #1 { + ret i32 0 +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local i32 @Initialize(i32* nocapture %0) local_unnamed_addr #0 { + store i32 -1, i32* @factor, align 4, !tbaa !5 + br label %2 + +2: ; preds = %1, %2 + %3 = phi i64 [ 1, %1 ], [ %10, %2 ] + %4 = load i8, i8* inttoptr (i64 2149580801 to i8*), align 1, !tbaa !9 + %5 = sext i8 %4 to i32 + %6 = trunc i64 %3 to i32 + %7 = mul i32 %6, %5 + %8 = sub i32 0, %7 + %9 = getelementptr inbounds i32, i32* %0, i64 %3 + store i32 %8, i32* %9, align 4, !tbaa !5 + %10 = add nuw nsw i64 %3, 1 + %11 = icmp eq i64 %10, 101 + br i1 %11, label %12, label %2, !llvm.loop !10 + +12: ; preds = %2 + ret i32 undef +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local i32 @BubbleSort(i32* nocapture %0) local_unnamed_addr #0 { + br label %6 + +2: ; preds = %22 + %3 = add nuw nsw i32 %8, 1 + %4 = add nsw i64 %7, -1 + %5 = icmp eq i32 %3, 100 + br i1 %5, label %24, label %6, !llvm.loop !13 + +6: ; preds = %1, %2 + %7 = phi i64 [ 100, %1 ], [ %4, %2 ] + %8 = phi i32 [ 1, %1 ], [ %3, %2 ] + br label %9 + +9: ; preds = %6, %19 + %10 = phi i64 [ 1, %6 ], [ %14, %19 ] + %11 = phi i32 [ 1, %6 ], [ %20, %19 ] + %12 = getelementptr inbounds i32, i32* %0, i64 %10 + %13 = load i32, i32* %12, align 4, !tbaa !5 + %14 = add nuw nsw i64 %10, 1 + %15 = getelementptr inbounds i32, i32* %0, i64 %14 + %16 = load i32, i32* %15, align 4, !tbaa !5 + %17 = icmp sgt i32 %13, %16 + br i1 %17, label %18, label %19 + +18: ; preds = %9 + store i32 %16, i32* %12, align 4, !tbaa !5 + store i32 %13, i32* %15, align 4, !tbaa !5 + br label %19 + +19: ; preds = %9, %18 + %20 = phi i32 [ 0, %18 ], [ %11, %9 ] + %21 = icmp eq i64 %14, %7 + br i1 %21, label %22, label %9, !llvm.loop !14 + +22: ; preds = %19 + %23 = icmp eq i32 %20, 0 + br i1 %23, label %2, label %24 + +24: ; preds = %22, %2 + ret i32 undef +} + +attributes #0 = { nofree norecurse nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #1 = { mustprogress nofree norecurse nosync nounwind readnone sspstrong uwtable willreturn "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = !{!6, !6, i64 0} +!6 = !{!"int", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C/C++ TBAA"} +!9 = !{!7, !7, i64 0} +!10 = distinct !{!10, !11, !12} +!11 = !{!"llvm.loop.mustprogress"} +!12 = !{!"llvm.loop.unroll.disable"} +!13 = distinct !{!13, !11, !12} +!14 = distinct !{!14, !11, !12} diff --git a/test/cnt.ll b/test/cnt.ll new file mode 100644 index 0000000..03aa425 --- /dev/null +++ b/test/cnt.ll @@ -0,0 +1,294 @@ +; ModuleID = 'cnt.c' +source_filename = "cnt.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +@Array = dso_local local_unnamed_addr global [10 x [10 x i32]] zeroinitializer, align 16 +@Seed = dso_local local_unnamed_addr global i32 0, align 4 +@Postotal = dso_local local_unnamed_addr global i32 0, align 4 +@Poscnt = dso_local local_unnamed_addr global i32 0, align 4 +@Negtotal = dso_local local_unnamed_addr global i32 0, align 4 +@Negcnt = dso_local local_unnamed_addr global i32 0, align 4 + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local i32 @main() local_unnamed_addr #0 { + store i32 0, i32* @Seed, align 4, !tbaa !5 + br label %1 + +1: ; preds = %13, %0 + %2 = phi i32 [ 0, %0 ], [ %9, %13 ] + %3 = phi i64 [ 0, %0 ], [ %14, %13 ] + br label %4 + +4: ; preds = %4, %1 + %5 = phi i32 [ %2, %1 ], [ %9, %4 ] + %6 = phi i64 [ 0, %1 ], [ %11, %4 ] + %7 = mul nsw i32 %5, 133 + %8 = add nsw i32 %7, 81 + %9 = srem i32 %8, 8095 + %10 = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @Array, i64 0, i64 %3, i64 %6 + store i32 %9, i32* %10, align 4, !tbaa !5 + %11 = add nuw nsw i64 %6, 1 + %12 = icmp eq i64 %11, 10 + br i1 %12, label %13, label %4, !llvm.loop !9 + +13: ; preds = %4 + %14 = add nuw nsw i64 %3, 1 + %15 = icmp eq i64 %14, 10 + br i1 %15, label %16, label %1, !llvm.loop !12 + +16: ; preds = %13 + store i32 %9, i32* @Seed, align 4, !tbaa !5 + br label %17 + +17: ; preds = %16, %45 + %18 = phi i64 [ %46, %45 ], [ 0, %16 ] + %19 = phi i32 [ %42, %45 ], [ 0, %16 ] + %20 = phi i32 [ %41, %45 ], [ 0, %16 ] + %21 = phi i32 [ %40, %45 ], [ 0, %16 ] + %22 = phi i32 [ %39, %45 ], [ 0, %16 ] + br label %23 + +23: ; preds = %38, %17 + %24 = phi i64 [ 0, %17 ], [ %43, %38 ] + %25 = phi i32 [ %19, %17 ], [ %42, %38 ] + %26 = phi i32 [ %20, %17 ], [ %41, %38 ] + %27 = phi i32 [ %21, %17 ], [ %40, %38 ] + %28 = phi i32 [ %22, %17 ], [ %39, %38 ] + %29 = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @Array, i64 0, i64 %18, i64 %24 + %30 = load i32, i32* %29, align 4, !tbaa !5 + %31 = icmp slt i32 %30, 0 + br i1 %31, label %32, label %35 + +32: ; preds = %23 + %33 = add nsw i32 %30, %28 + %34 = add nsw i32 %26, 1 + br label %38 + +35: ; preds = %23 + %36 = add nsw i32 %30, %27 + %37 = add nsw i32 %25, 1 + br label %38 + +38: ; preds = %35, %32 + %39 = phi i32 [ %33, %32 ], [ %28, %35 ] + %40 = phi i32 [ %27, %32 ], [ %36, %35 ] + %41 = phi i32 [ %34, %32 ], [ %26, %35 ] + %42 = phi i32 [ %25, %32 ], [ %37, %35 ] + %43 = add nuw nsw i64 %24, 1 + %44 = icmp eq i64 %43, 10 + br i1 %44, label %45, label %23, !llvm.loop !13 + +45: ; preds = %38 + %46 = add nuw nsw i64 %18, 1 + %47 = icmp eq i64 %46, 10 + br i1 %47, label %48, label %17, !llvm.loop !14 + +48: ; preds = %45 + store i32 %39, i32* @Postotal, align 4, !tbaa !5 + store i32 %41, i32* @Poscnt, align 4, !tbaa !5 + store i32 %40, i32* @Negtotal, align 4, !tbaa !5 + store i32 %42, i32* @Negcnt, align 4, !tbaa !5 + ret i32 1 +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn writeonly +define dso_local i32 @InitSeed() local_unnamed_addr #1 { + store i32 0, i32* @Seed, align 4, !tbaa !5 + ret i32 0 +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local i32 @Test([10 x i32]* nocapture %0) local_unnamed_addr #0 { + br label %2 + +2: ; preds = %13, %1 + %3 = phi i64 [ 0, %1 ], [ %14, %13 ] + br label %4 + +4: ; preds = %4, %2 + %5 = phi i64 [ 0, %2 ], [ %11, %4 ] + %6 = load i32, i32* @Seed, align 4, !tbaa !5 + %7 = mul nsw i32 %6, 133 + %8 = add nsw i32 %7, 81 + %9 = srem i32 %8, 8095 + store i32 %9, i32* @Seed, align 4, !tbaa !5 + %10 = getelementptr inbounds [10 x i32], [10 x i32]* %0, i64 %3, i64 %5 + store i32 %9, i32* %10, align 4, !tbaa !5 + %11 = add nuw nsw i64 %5, 1 + %12 = icmp eq i64 %11, 10 + br i1 %12, label %13, label %4, !llvm.loop !9 + +13: ; preds = %4 + %14 = add nuw nsw i64 %3, 1 + %15 = icmp eq i64 %14, 10 + br i1 %15, label %16, label %2, !llvm.loop !12 + +16: ; preds = %13, %44 + %17 = phi i64 [ %45, %44 ], [ 0, %13 ] + %18 = phi i32 [ %41, %44 ], [ 0, %13 ] + %19 = phi i32 [ %40, %44 ], [ 0, %13 ] + %20 = phi i32 [ %39, %44 ], [ 0, %13 ] + %21 = phi i32 [ %38, %44 ], [ 0, %13 ] + br label %22 + +22: ; preds = %37, %16 + %23 = phi i64 [ 0, %16 ], [ %42, %37 ] + %24 = phi i32 [ %18, %16 ], [ %41, %37 ] + %25 = phi i32 [ %19, %16 ], [ %40, %37 ] + %26 = phi i32 [ %20, %16 ], [ %39, %37 ] + %27 = phi i32 [ %21, %16 ], [ %38, %37 ] + %28 = getelementptr inbounds [10 x i32], [10 x i32]* %0, i64 %17, i64 %23 + %29 = load i32, i32* %28, align 4, !tbaa !5 + %30 = icmp slt i32 %29, 0 + br i1 %30, label %31, label %34 + +31: ; preds = %22 + %32 = add nsw i32 %29, %27 + %33 = add nsw i32 %25, 1 + br label %37 + +34: ; preds = %22 + %35 = add nsw i32 %29, %26 + %36 = add nsw i32 %24, 1 + br label %37 + +37: ; preds = %34, %31 + %38 = phi i32 [ %32, %31 ], [ %27, %34 ] + %39 = phi i32 [ %26, %31 ], [ %35, %34 ] + %40 = phi i32 [ %33, %31 ], [ %25, %34 ] + %41 = phi i32 [ %24, %31 ], [ %36, %34 ] + %42 = add nuw nsw i64 %23, 1 + %43 = icmp eq i64 %42, 10 + br i1 %43, label %44, label %22, !llvm.loop !13 + +44: ; preds = %37 + %45 = add nuw nsw i64 %17, 1 + %46 = icmp eq i64 %45, 10 + br i1 %46, label %47, label %16, !llvm.loop !14 + +47: ; preds = %44 + store i32 %38, i32* @Postotal, align 4, !tbaa !5 + store i32 %40, i32* @Poscnt, align 4, !tbaa !5 + store i32 %39, i32* @Negtotal, align 4, !tbaa !5 + store i32 %41, i32* @Negcnt, align 4, !tbaa !5 + ret i32 0 +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local i32 @Initialize([10 x i32]* nocapture %0) local_unnamed_addr #0 { + br label %2 + +2: ; preds = %1, %13 + %3 = phi i64 [ 0, %1 ], [ %14, %13 ] + br label %4 + +4: ; preds = %2, %4 + %5 = phi i64 [ 0, %2 ], [ %11, %4 ] + %6 = load i32, i32* @Seed, align 4, !tbaa !5 + %7 = mul nsw i32 %6, 133 + %8 = add nsw i32 %7, 81 + %9 = srem i32 %8, 8095 + store i32 %9, i32* @Seed, align 4, !tbaa !5 + %10 = getelementptr inbounds [10 x i32], [10 x i32]* %0, i64 %3, i64 %5 + store i32 %9, i32* %10, align 4, !tbaa !5 + %11 = add nuw nsw i64 %5, 1 + %12 = icmp eq i64 %11, 10 + br i1 %12, label %13, label %4, !llvm.loop !9 + +13: ; preds = %4 + %14 = add nuw nsw i64 %3, 1 + %15 = icmp eq i64 %14, 10 + br i1 %15, label %16, label %2, !llvm.loop !12 + +16: ; preds = %13 + ret i32 0 +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local void @Sum([10 x i32]* nocapture readonly %0) local_unnamed_addr #0 { + br label %2 + +2: ; preds = %1, %30 + %3 = phi i64 [ 0, %1 ], [ %31, %30 ] + %4 = phi i32 [ 0, %1 ], [ %27, %30 ] + %5 = phi i32 [ 0, %1 ], [ %26, %30 ] + %6 = phi i32 [ 0, %1 ], [ %25, %30 ] + %7 = phi i32 [ 0, %1 ], [ %24, %30 ] + br label %8 + +8: ; preds = %2, %23 + %9 = phi i64 [ 0, %2 ], [ %28, %23 ] + %10 = phi i32 [ %4, %2 ], [ %27, %23 ] + %11 = phi i32 [ %5, %2 ], [ %26, %23 ] + %12 = phi i32 [ %6, %2 ], [ %25, %23 ] + %13 = phi i32 [ %7, %2 ], [ %24, %23 ] + %14 = getelementptr inbounds [10 x i32], [10 x i32]* %0, i64 %3, i64 %9 + %15 = load i32, i32* %14, align 4, !tbaa !5 + %16 = icmp slt i32 %15, 0 + br i1 %16, label %17, label %20 + +17: ; preds = %8 + %18 = add nsw i32 %15, %13 + %19 = add nsw i32 %11, 1 + br label %23 + +20: ; preds = %8 + %21 = add nsw i32 %15, %12 + %22 = add nsw i32 %10, 1 + br label %23 + +23: ; preds = %17, %20 + %24 = phi i32 [ %18, %17 ], [ %13, %20 ] + %25 = phi i32 [ %12, %17 ], [ %21, %20 ] + %26 = phi i32 [ %19, %17 ], [ %11, %20 ] + %27 = phi i32 [ %10, %17 ], [ %22, %20 ] + %28 = add nuw nsw i64 %9, 1 + %29 = icmp eq i64 %28, 10 + br i1 %29, label %30, label %8, !llvm.loop !13 + +30: ; preds = %23 + %31 = add nuw nsw i64 %3, 1 + %32 = icmp eq i64 %31, 10 + br i1 %32, label %33, label %2, !llvm.loop !14 + +33: ; preds = %30 + store i32 %24, i32* @Postotal, align 4, !tbaa !5 + store i32 %26, i32* @Poscnt, align 4, !tbaa !5 + store i32 %25, i32* @Negtotal, align 4, !tbaa !5 + store i32 %27, i32* @Negcnt, align 4, !tbaa !5 + ret void +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn +define dso_local i32 @RandomInteger() local_unnamed_addr #2 { + %1 = load i32, i32* @Seed, align 4, !tbaa !5 + %2 = mul nsw i32 %1, 133 + %3 = add nsw i32 %2, 81 + %4 = srem i32 %3, 8095 + store i32 %4, i32* @Seed, align 4, !tbaa !5 + ret i32 %4 +} + +attributes #0 = { nofree norecurse nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #1 = { mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn writeonly "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #2 = { mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = !{!6, !6, i64 0} +!6 = !{!"int", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C/C++ TBAA"} +!9 = distinct !{!9, !10, !11} +!10 = !{!"llvm.loop.mustprogress"} +!11 = !{!"llvm.loop.unroll.disable"} +!12 = distinct !{!12, !10, !11} +!13 = distinct !{!13, !10, !11} +!14 = distinct !{!14, !10, !11} diff --git a/test/compress.ll b/test/compress.ll new file mode 100644 index 0000000..d541061 --- /dev/null +++ b/test/compress.ll @@ -0,0 +1,798 @@ +; ModuleID = 'compress.c' +source_filename = "compress.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +@maxbits = dso_local local_unnamed_addr global i32 16, align 4 +@maxmaxcode = dso_local local_unnamed_addr global i64 65536, align 8 +@hsize = dso_local local_unnamed_addr global i64 257, align 8 +@free_ent = dso_local local_unnamed_addr global i64 0, align 8 +@exit_stat = dso_local local_unnamed_addr global i32 0, align 4 +@nomagic = dso_local local_unnamed_addr global i32 1, align 4 +@zcat_flg = dso_local local_unnamed_addr global i32 0, align 4 +@quiet = dso_local local_unnamed_addr global i32 1, align 4 +@block_compress = dso_local local_unnamed_addr global i32 128, align 4 +@clear_flg = dso_local local_unnamed_addr global i32 0, align 4 +@ratio = dso_local local_unnamed_addr global i64 0, align 8 +@checkpoint = dso_local local_unnamed_addr global i64 10000, align 8 +@force = dso_local local_unnamed_addr global i32 0, align 4 +@InCnt = dso_local local_unnamed_addr global i32 0, align 4 +@apsim_InCnt = dso_local local_unnamed_addr global i32 0, align 4 +@orig_text_buffer = dso_local global [50 x i8] zeroinitializer, align 16 +@InBuff = dso_local local_unnamed_addr global i8* null, align 8 +@comp_text_buffer = dso_local global [55 x i8] zeroinitializer, align 16 +@OutBuff = dso_local local_unnamed_addr global i8* null, align 8 +@in_count = dso_local local_unnamed_addr global i64 1, align 8 +@out_count = dso_local local_unnamed_addr global i64 0, align 8 +@offset = internal unnamed_addr global i32 0, align 4 +@bytes_out = dso_local local_unnamed_addr global i64 0, align 8 +@n_bits = dso_local local_unnamed_addr global i32 0, align 4 +@maxcode = dso_local local_unnamed_addr global i64 0, align 8 +@htab = dso_local local_unnamed_addr global [257 x i64] zeroinitializer, align 16 +@codetab = dso_local local_unnamed_addr global [257 x i16] zeroinitializer, align 16 +@lmask = dso_local local_unnamed_addr global [9 x i8] c"\FF\FE\FC\F8\F0\E0\C0\80\00", align 1 +@rmask = dso_local local_unnamed_addr global [9 x i8] c"\00\01\03\07\0F\1F?\7F\FF", align 1 +@buf = dso_local global [16 x i8] zeroinitializer, align 16 +@fsize = dso_local local_unnamed_addr global i64 0, align 8 +@ofname = dso_local local_unnamed_addr global [100 x i8] zeroinitializer, align 16 + +; Function Attrs: nofree nosync nounwind sspstrong uwtable +define dso_local i32 @main() local_unnamed_addr #0 { + br label %1 + +1: ; preds = %1, %0 + %2 = phi i64 [ 0, %0 ], [ %9, %1 ] + %3 = phi i32 [ 1, %0 ], [ %6, %1 ] + %4 = mul nsw i32 %3, 133 + %5 = add nsw i32 %4, 81 + %6 = srem i32 %5, 8095 + %7 = trunc i32 %6 to i8 + %8 = getelementptr inbounds [50 x i8], [50 x i8]* @orig_text_buffer, i64 0, i64 %2 + store i8 %7, i8* %8, align 1, !tbaa !5 + %9 = add nuw nsw i64 %2, 1 + %10 = icmp eq i64 %9, 50 + br i1 %10, label %11, label %1, !llvm.loop !8 + +11: ; preds = %1 + store i32 16, i32* @maxbits, align 4, !tbaa !11 + store i64 65536, i64* @maxmaxcode, align 8, !tbaa !13 + store i32 50, i32* @InCnt, align 4, !tbaa !11 + store i32 53, i32* @apsim_InCnt, align 4, !tbaa !11 + store i8* getelementptr inbounds ([50 x i8], [50 x i8]* @orig_text_buffer, i64 0, i64 0), i8** @InBuff, align 8, !tbaa !15 + store i8* getelementptr inbounds ([55 x i8], [55 x i8]* @comp_text_buffer, i64 0, i64 0), i8** @OutBuff, align 8, !tbaa !15 + call void @compress() + ret i32 0 +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable writeonly +define dso_local void @initbuffer() local_unnamed_addr #1 { + br label %1 + +1: ; preds = %0, %1 + %2 = phi i64 [ 0, %0 ], [ %9, %1 ] + %3 = phi i32 [ 1, %0 ], [ %6, %1 ] + %4 = mul nsw i32 %3, 133 + %5 = add nsw i32 %4, 81 + %6 = srem i32 %5, 8095 + %7 = trunc i32 %6 to i8 + %8 = getelementptr inbounds [50 x i8], [50 x i8]* @orig_text_buffer, i64 0, i64 %2 + store i8 %7, i8* %8, align 1, !tbaa !5 + %9 = add nuw nsw i64 %2, 1 + %10 = icmp eq i64 %9, 50 + br i1 %10, label %11, label %1, !llvm.loop !8 + +11: ; preds = %1 + ret void +} + +; Function Attrs: nofree nosync nounwind sspstrong uwtable +define dso_local void @compress() local_unnamed_addr #0 { + store i32 0, i32* @offset, align 4, !tbaa !11 + store i64 3, i64* @bytes_out, align 8, !tbaa !13 + store i64 0, i64* @out_count, align 8, !tbaa !13 + store i32 0, i32* @clear_flg, align 4, !tbaa !11 + store i64 0, i64* @ratio, align 8, !tbaa !13 + store i64 1, i64* @in_count, align 8, !tbaa !13 + store i64 10000, i64* @checkpoint, align 8, !tbaa !13 + store i32 9, i32* @n_bits, align 4, !tbaa !11 + store i64 511, i64* @maxcode, align 8, !tbaa !13 + %1 = load i32, i32* @block_compress, align 4, !tbaa !11 + %2 = icmp eq i32 %1, 0 + %3 = select i1 %2, i64 256, i64 257 + store i64 %3, i64* @free_ent, align 8, !tbaa !13 + %4 = load i32, i32* @InCnt, align 4, !tbaa !11 + %5 = icmp sgt i32 %4, 0 + br i1 %5, label %6, label %16 + +6: ; preds = %0 + %7 = load i32, i32* @apsim_InCnt, align 4, !tbaa !11 + %8 = add nsw i32 %7, -1 + store i32 %8, i32* @apsim_InCnt, align 4, !tbaa !11 + %9 = icmp sgt i32 %7, 0 + br i1 %9, label %10, label %16 + +10: ; preds = %6 + %11 = add nsw i32 %4, -1 + store i32 %11, i32* @InCnt, align 4, !tbaa !11 + %12 = load i8*, i8** @InBuff, align 8, !tbaa !15 + %13 = getelementptr inbounds i8, i8* %12, i64 1 + store i8* %13, i8** @InBuff, align 8, !tbaa !15 + %14 = load i8, i8* %12, align 1, !tbaa !5 + %15 = zext i8 %14 to i64 + br label %16 + +16: ; preds = %0, %6, %10 + %17 = phi i64 [ %15, %10 ], [ 4294967295, %6 ], [ 4294967295, %0 ] + %18 = load i64, i64* @hsize, align 8, !tbaa !13 + %19 = icmp slt i64 %18, 65536 + br i1 %19, label %20, label %28 + +20: ; preds = %16, %20 + %21 = phi i32 [ %23, %20 ], [ 0, %16 ] + %22 = phi i64 [ %24, %20 ], [ %18, %16 ] + %23 = add nuw nsw i32 %21, 1 + %24 = shl nsw i64 %22, 1 + %25 = icmp slt i64 %22, 32768 + br i1 %25, label %20, label %26, !llvm.loop !17 + +26: ; preds = %20 + %27 = sub nsw i32 7, %21 + br label %28 + +28: ; preds = %26, %16 + %29 = phi i32 [ 8, %16 ], [ %27, %26 ] + %30 = add i64 %18, -16 + %31 = add i64 %18, 15 + %32 = call i64 @llvm.smin.i64(i64 %18, i64 31) #6 + %33 = sub i64 %31, %32 + %34 = and i64 %33, -16 + %35 = sub i64 %30, %34 + %36 = getelementptr [257 x i64], [257 x i64]* @htab, i64 0, i64 %35 + %37 = bitcast i64* %36 to i8* + %38 = shl nuw i64 %33, 3 + %39 = add i64 %38, 128 + %40 = and i64 %39, -128 + call void @llvm.memset.p0i8.i64(i8* align 8 %37, i8 -1, i64 %40, i1 false) #6 + %41 = sub i64 %18, %34 + %42 = icmp sgt i64 %41, 16 + br i1 %42, label %43, label %52 + +43: ; preds = %28 + %44 = call i64 @llvm.smin.i64(i64 %35, i64 1) #6 + %45 = add i64 %44, -1 + %46 = getelementptr [257 x i64], [257 x i64]* @htab, i64 0, i64 %45 + %47 = bitcast i64* %46 to i8* + %48 = add i64 %18, -15 + %49 = add i64 %34, %44 + %50 = sub i64 %48, %49 + %51 = shl nuw i64 %50, 3 + call void @llvm.memset.p0i8.i64(i8* align 8 %47, i8 -1, i64 %51, i1 false) #6 + br label %52 + +52: ; preds = %28, %43 + %53 = load i32, i32* @InCnt, align 4, !tbaa !11 + %54 = icmp sgt i32 %53, 0 + br i1 %54, label %55, label %178 + +55: ; preds = %52, %174 + %56 = phi i32 [ %176, %174 ], [ %53, %52 ] + %57 = phi i64 [ %175, %174 ], [ %17, %52 ] + %58 = load i32, i32* @apsim_InCnt, align 4, !tbaa !11 + %59 = add nsw i32 %58, -1 + store i32 %59, i32* @apsim_InCnt, align 4, !tbaa !11 + %60 = icmp sgt i32 %58, 0 + br i1 %60, label %61, label %67 + +61: ; preds = %55 + %62 = add nsw i32 %56, -1 + store i32 %62, i32* @InCnt, align 4, !tbaa !11 + %63 = load i8*, i8** @InBuff, align 8, !tbaa !15 + %64 = getelementptr inbounds i8, i8* %63, i64 1 + store i8* %64, i8** @InBuff, align 8, !tbaa !15 + %65 = load i8, i8* %63, align 1, !tbaa !5 + %66 = zext i8 %65 to i32 + br label %67 + +67: ; preds = %55, %61 + %68 = phi i32 [ %66, %61 ], [ -1, %55 ] + %69 = load i64, i64* @in_count, align 8, !tbaa !13 + %70 = add nsw i64 %69, 1 + store i64 %70, i64* @in_count, align 8, !tbaa !13 + %71 = sext i32 %68 to i64 + %72 = load i32, i32* @maxbits, align 4, !tbaa !11 + %73 = zext i32 %72 to i64 + %74 = shl i64 %71, %73 + %75 = add nsw i64 %74, %57 + %76 = shl i32 %68, %29 + %77 = sext i32 %76 to i64 + %78 = xor i64 %57, %77 + %79 = getelementptr inbounds [257 x i64], [257 x i64]* @htab, i64 0, i64 %78 + %80 = load i64, i64* %79, align 8, !tbaa !13 + %81 = icmp eq i64 %80, %75 + br i1 %81, label %82, label %86 + +82: ; preds = %67 + %83 = getelementptr inbounds [257 x i16], [257 x i16]* @codetab, i64 0, i64 %78 + %84 = load i16, i16* %83, align 2, !tbaa !18 + %85 = zext i16 %84 to i64 + br label %174, !llvm.loop !20 + +86: ; preds = %67 + %87 = icmp slt i64 %80, 0 + br i1 %87, label %113, label %88 + +88: ; preds = %86 + %89 = sub nsw i64 %18, %78 + %90 = icmp eq i64 %78, 0 + %91 = shl i64 %89, 32 + %92 = ashr exact i64 %91, 32 + %93 = select i1 %90, i64 1, i64 %92 + br label %94 + +94: ; preds = %108, %88 + %95 = phi i64 [ 0, %88 ], [ %110, %108 ] + %96 = phi i64 [ %78, %88 ], [ %100, %108 ] + %97 = sub nsw i64 %96, %93 + %98 = icmp slt i64 %97, 0 + %99 = select i1 %98, i64 %18, i64 0 + %100 = add nsw i64 %99, %97 + %101 = getelementptr inbounds [257 x i64], [257 x i64]* @htab, i64 0, i64 %100 + %102 = load i64, i64* %101, align 8, !tbaa !13 + %103 = icmp eq i64 %102, %75 + br i1 %103, label %104, label %108 + +104: ; preds = %94 + %105 = getelementptr inbounds [257 x i16], [257 x i16]* @codetab, i64 0, i64 %100 + %106 = load i16, i16* %105, align 2, !tbaa !18 + %107 = zext i16 %106 to i64 + br label %174, !llvm.loop !20 + +108: ; preds = %94 + %109 = icmp slt i64 %102, 1 + %110 = add nuw i64 %95, 1 + %111 = icmp slt i64 %69, %110 + %112 = select i1 %109, i1 true, i1 %111 + br i1 %112, label %113, label %94 + +113: ; preds = %108, %86 + %114 = phi i64 [ %78, %86 ], [ %100, %108 ] + %115 = load i64, i64* @out_count, align 8, !tbaa !13 + %116 = add nsw i64 %115, 1 + store i64 %116, i64* @out_count, align 8, !tbaa !13 + %117 = load i64, i64* @free_ent, align 8, !tbaa !13 + %118 = load i64, i64* @maxmaxcode, align 8, !tbaa !13 + %119 = icmp slt i64 %117, %118 + br i1 %119, label %120, label %125 + +120: ; preds = %113 + %121 = add nsw i64 %117, 1 + store i64 %121, i64* @free_ent, align 8, !tbaa !13 + %122 = trunc i64 %117 to i16 + %123 = getelementptr inbounds [257 x i16], [257 x i16]* @codetab, i64 0, i64 %114 + store i16 %122, i16* %123, align 2, !tbaa !18 + %124 = getelementptr inbounds [257 x i64], [257 x i64]* @htab, i64 0, i64 %114 + store i64 %75, i64* %124, align 8, !tbaa !13 + br label %174 + +125: ; preds = %113 + %126 = load i64, i64* @checkpoint, align 8, !tbaa !13 + %127 = icmp sge i64 %70, %126 + %128 = load i32, i32* @block_compress, align 4 + %129 = icmp ne i32 %128, 0 + %130 = select i1 %127, i1 %129, i1 false + br i1 %130, label %131, label %174 + +131: ; preds = %125 + %132 = add nsw i64 %69, 10001 + store i64 %132, i64* @checkpoint, align 8, !tbaa !13 + %133 = icmp sgt i64 %69, 8388606 + br i1 %133, label %134, label %140 + +134: ; preds = %131 + %135 = load i64, i64* @bytes_out, align 8, !tbaa !13 + %136 = icmp ult i64 %135, 256 + br i1 %136, label %144, label %137 + +137: ; preds = %134 + %138 = ashr i64 %135, 8 + %139 = sdiv i64 %70, %138 + br label %144 + +140: ; preds = %131 + %141 = shl i64 %70, 8 + %142 = load i64, i64* @bytes_out, align 8, !tbaa !13 + %143 = sdiv i64 %141, %142 + br label %144 + +144: ; preds = %140, %137, %134 + %145 = phi i64 [ %139, %137 ], [ %143, %140 ], [ 2147483647, %134 ] + %146 = load i64, i64* @ratio, align 8, !tbaa !13 + %147 = icmp sgt i64 %145, %146 + br i1 %147, label %148, label %149 + +148: ; preds = %144 + store i64 %145, i64* @ratio, align 8, !tbaa !13 + br label %174 + +149: ; preds = %144 + store i64 0, i64* @ratio, align 8, !tbaa !13 + %150 = load i64, i64* @hsize, align 8, !tbaa !13 + %151 = add i64 %150, -16 + %152 = add i64 %150, 15 + %153 = call i64 @llvm.smin.i64(i64 %150, i64 31) #6 + %154 = sub i64 %152, %153 + %155 = and i64 %154, -16 + %156 = sub i64 %151, %155 + %157 = getelementptr [257 x i64], [257 x i64]* @htab, i64 0, i64 %156 + %158 = bitcast i64* %157 to i8* + %159 = shl nuw i64 %154, 3 + %160 = add i64 %159, 128 + %161 = and i64 %160, -128 + call void @llvm.memset.p0i8.i64(i8* align 8 %158, i8 -1, i64 %161, i1 false) #6 + %162 = sub i64 %150, %155 + %163 = icmp sgt i64 %162, 16 + br i1 %163, label %164, label %173 + +164: ; preds = %149 + %165 = call i64 @llvm.smin.i64(i64 %156, i64 1) #6 + %166 = add i64 %165, -1 + %167 = getelementptr [257 x i64], [257 x i64]* @htab, i64 0, i64 %166 + %168 = bitcast i64* %167 to i8* + %169 = add i64 %150, -15 + %170 = add i64 %155, %165 + %171 = sub i64 %169, %170 + %172 = shl nuw i64 %171, 3 + call void @llvm.memset.p0i8.i64(i8* align 8 %168, i8 -1, i64 %172, i1 false) #6 + br label %173 + +173: ; preds = %164, %149 + store i64 257, i64* @free_ent, align 8, !tbaa !13 + store i32 1, i32* @clear_flg, align 4, !tbaa !11 + call void @output(i64 256) #6 + br label %174 + +174: ; preds = %173, %148, %120, %125, %104, %82 + %175 = phi i64 [ %85, %82 ], [ %107, %104 ], [ %71, %125 ], [ %71, %120 ], [ %71, %148 ], [ %71, %173 ] + %176 = load i32, i32* @InCnt, align 4, !tbaa !11 + %177 = icmp sgt i32 %176, 0 + br i1 %177, label %55, label %178, !llvm.loop !20 + +178: ; preds = %174, %52 + %179 = load i64, i64* @bytes_out, align 8, !tbaa !13 + %180 = load i64, i64* @in_count, align 8, !tbaa !13 + %181 = icmp sgt i64 %179, %180 + br i1 %181, label %182, label %183 + +182: ; preds = %178 + store i32 2, i32* @exit_stat, align 4, !tbaa !11 + br label %183 + +183: ; preds = %182, %178 + ret void +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn +define dso_local i32 @getbyte() local_unnamed_addr #2 { + %1 = load i32, i32* @InCnt, align 4, !tbaa !11 + %2 = icmp sgt i32 %1, 0 + br i1 %2, label %3, label %13 + +3: ; preds = %0 + %4 = load i32, i32* @apsim_InCnt, align 4, !tbaa !11 + %5 = add nsw i32 %4, -1 + store i32 %5, i32* @apsim_InCnt, align 4, !tbaa !11 + %6 = icmp sgt i32 %4, 0 + br i1 %6, label %7, label %13 + +7: ; preds = %3 + %8 = add nsw i32 %1, -1 + store i32 %8, i32* @InCnt, align 4, !tbaa !11 + %9 = load i8*, i8** @InBuff, align 8, !tbaa !15 + %10 = getelementptr inbounds i8, i8* %9, i64 1 + store i8* %10, i8** @InBuff, align 8, !tbaa !15 + %11 = load i8, i8* %9, align 1, !tbaa !5 + %12 = zext i8 %11 to i32 + br label %13 + +13: ; preds = %0, %3, %7 + %14 = phi i32 [ %12, %7 ], [ -1, %3 ], [ -1, %0 ] + ret i32 %14 +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable writeonly +define dso_local void @cl_hash(i64 %0) local_unnamed_addr #1 { + %2 = add i64 %0, -16 + %3 = add i64 %0, 15 + %4 = call i64 @llvm.smin.i64(i64 %0, i64 31) + %5 = sub i64 %3, %4 + %6 = and i64 %5, -16 + %7 = sub i64 %2, %6 + %8 = getelementptr [257 x i64], [257 x i64]* @htab, i64 0, i64 %7 + %9 = bitcast i64* %8 to i8* + %10 = shl nuw i64 %5, 3 + %11 = add i64 %10, 128 + %12 = and i64 %11, -128 + call void @llvm.memset.p0i8.i64(i8* align 8 %9, i8 -1, i64 %12, i1 false) + %13 = add i64 %0, 15 + %14 = call i64 @llvm.smin.i64(i64 %0, i64 31) + %15 = sub i64 %13, %14 + %16 = and i64 %15, -16 + %17 = sub i64 %0, %16 + %18 = icmp sgt i64 %17, 16 + br i1 %18, label %19, label %30 + +19: ; preds = %1 + %20 = add i64 %0, -16 + %21 = sub i64 %20, %16 + %22 = call i64 @llvm.smin.i64(i64 %21, i64 1) + %23 = add i64 %22, -1 + %24 = getelementptr [257 x i64], [257 x i64]* @htab, i64 0, i64 %23 + %25 = bitcast i64* %24 to i8* + %26 = add i64 %0, -15 + %27 = add i64 %22, %16 + %28 = sub i64 %26, %27 + %29 = shl nuw i64 %28, 3 + call void @llvm.memset.p0i8.i64(i8* align 8 %25, i8 -1, i64 %29, i1 false) + br label %30 + +30: ; preds = %19, %1 + ret void +} + +; Function Attrs: nofree nosync nounwind sspstrong uwtable +define dso_local void @cl_block() local_unnamed_addr #0 { + %1 = load i64, i64* @in_count, align 8, !tbaa !13 + %2 = add nsw i64 %1, 10000 + store i64 %2, i64* @checkpoint, align 8, !tbaa !13 + %3 = icmp sgt i64 %1, 8388607 + br i1 %3, label %4, label %10 + +4: ; preds = %0 + %5 = load i64, i64* @bytes_out, align 8, !tbaa !13 + %6 = icmp ult i64 %5, 256 + br i1 %6, label %14, label %7 + +7: ; preds = %4 + %8 = ashr i64 %5, 8 + %9 = sdiv i64 %1, %8 + br label %14 + +10: ; preds = %0 + %11 = shl i64 %1, 8 + %12 = load i64, i64* @bytes_out, align 8, !tbaa !13 + %13 = sdiv i64 %11, %12 + br label %14 + +14: ; preds = %4, %7, %10 + %15 = phi i64 [ %9, %7 ], [ %13, %10 ], [ 2147483647, %4 ] + %16 = load i64, i64* @ratio, align 8, !tbaa !13 + %17 = icmp sgt i64 %15, %16 + br i1 %17, label %18, label %19 + +18: ; preds = %14 + store i64 %15, i64* @ratio, align 8, !tbaa !13 + br label %44 + +19: ; preds = %14 + store i64 0, i64* @ratio, align 8, !tbaa !13 + %20 = load i64, i64* @hsize, align 8, !tbaa !13 + %21 = add i64 %20, -16 + %22 = add i64 %20, 15 + %23 = call i64 @llvm.smin.i64(i64 %20, i64 31) #6 + %24 = sub i64 %22, %23 + %25 = and i64 %24, -16 + %26 = sub i64 %21, %25 + %27 = getelementptr [257 x i64], [257 x i64]* @htab, i64 0, i64 %26 + %28 = bitcast i64* %27 to i8* + %29 = shl nuw i64 %24, 3 + %30 = add i64 %29, 128 + %31 = and i64 %30, -128 + call void @llvm.memset.p0i8.i64(i8* align 8 %28, i8 -1, i64 %31, i1 false) #6 + %32 = sub i64 %20, %25 + %33 = icmp sgt i64 %32, 16 + br i1 %33, label %34, label %43 + +34: ; preds = %19 + %35 = call i64 @llvm.smin.i64(i64 %26, i64 1) #6 + %36 = add i64 %35, -1 + %37 = getelementptr [257 x i64], [257 x i64]* @htab, i64 0, i64 %36 + %38 = bitcast i64* %37 to i8* + %39 = add i64 %20, -15 + %40 = add i64 %25, %35 + %41 = sub i64 %39, %40 + %42 = shl nuw i64 %41, 3 + call void @llvm.memset.p0i8.i64(i8* align 8 %38, i8 -1, i64 %42, i1 false) #6 + br label %43 + +43: ; preds = %19, %34 + store i64 257, i64* @free_ent, align 8, !tbaa !13 + store i32 1, i32* @clear_flg, align 4, !tbaa !11 + call void @output(i64 256) + br label %44 + +44: ; preds = %43, %18 + ret void +} + +; Function Attrs: nofree nosync nounwind sspstrong uwtable +define dso_local void @output(i64 %0) local_unnamed_addr #0 { + %2 = load i32, i32* @offset, align 4, !tbaa !11 + %3 = icmp sgt i64 %0, -1 + br i1 %3, label %4, label %105 + +4: ; preds = %1 + %5 = load i32, i32* @n_bits, align 4, !tbaa !11 + %6 = ashr i32 %2, 3 + %7 = sext i32 %6 to i64 + %8 = getelementptr inbounds [16 x i8], [16 x i8]* @buf, i64 0, i64 %7 + %9 = and i32 %2, 7 + %10 = load i8, i8* %8, align 1, !tbaa !5 + %11 = zext i32 %9 to i64 + %12 = getelementptr inbounds [9 x i8], [9 x i8]* @rmask, i64 0, i64 %11 + %13 = load i8, i8* %12, align 1, !tbaa !5 + %14 = and i8 %13, %10 + %15 = shl i64 %0, %11 + %16 = getelementptr inbounds [9 x i8], [9 x i8]* @lmask, i64 0, i64 %11 + %17 = load i8, i8* %16, align 1, !tbaa !5 + %18 = trunc i64 %15 to i8 + %19 = or i8 %14, %18 + %20 = and i8 %19, %17 + store i8 %20, i8* %8, align 1, !tbaa !5 + %21 = getelementptr inbounds i8, i8* %8, i64 1 + %22 = sub nuw nsw i32 8, %9 + %23 = sub nsw i32 %5, %22 + %24 = zext i32 %22 to i64 + %25 = ashr i64 %0, %24 + %26 = icmp sgt i32 %23, 7 + br i1 %26, label %27, label %32 + +27: ; preds = %4 + %28 = trunc i64 %25 to i8 + %29 = getelementptr inbounds i8, i8* %8, i64 2 + store i8 %28, i8* %21, align 1, !tbaa !5 + %30 = ashr i64 %25, 8 + %31 = add nsw i32 %23, -8 + br label %32 + +32: ; preds = %27, %4 + %33 = phi i64 [ %30, %27 ], [ %25, %4 ] + %34 = phi i32 [ %31, %27 ], [ %23, %4 ] + %35 = phi i8* [ %29, %27 ], [ %21, %4 ] + %36 = icmp eq i32 %34, 0 + br i1 %36, label %39, label %37 + +37: ; preds = %32 + %38 = trunc i64 %33 to i8 + store i8 %38, i8* %35, align 1, !tbaa !5 + br label %39 + +39: ; preds = %37, %32 + %40 = add nsw i32 %5, %2 + store i32 %40, i32* @offset, align 4, !tbaa !11 + %41 = shl i32 %5, 3 + %42 = icmp eq i32 %40, %41 + br i1 %42, label %43, label %59 + +43: ; preds = %39 + %44 = sext i32 %5 to i64 + %45 = load i64, i64* @bytes_out, align 8, !tbaa !13 + %46 = add nsw i64 %45, %44 + store i64 %46, i64* @bytes_out, align 8, !tbaa !13 + %47 = add i32 %5, -1 + %48 = call i32 @llvm.umin.i32(i32 %47, i32 15) + %49 = zext i32 %48 to i64 + %50 = getelementptr [16 x i8], [16 x i8]* @buf, i64 0, i64 %49 + br label %51 + +51: ; preds = %51, %43 + %52 = phi i8* [ getelementptr inbounds ([16 x i8], [16 x i8]* @buf, i64 0, i64 0), %43 ], [ %53, %51 ] + %53 = getelementptr inbounds i8, i8* %52, i64 1 + %54 = load i8, i8* %52, align 1, !tbaa !5 + %55 = load i8*, i8** @OutBuff, align 8, !tbaa !15 + %56 = getelementptr inbounds i8, i8* %55, i64 1 + store i8* %56, i8** @OutBuff, align 8, !tbaa !15 + store i8 %54, i8* %55, align 1, !tbaa !5 + %57 = icmp eq i8* %52, %50 + br i1 %57, label %58, label %51, !llvm.loop !21 + +58: ; preds = %51 + store i32 0, i32* @offset, align 4, !tbaa !11 + br label %59 + +59: ; preds = %58, %39 + %60 = load i64, i64* @free_ent, align 8, !tbaa !13 + %61 = load i64, i64* @maxcode, align 8, !tbaa !13 + %62 = icmp sgt i64 %60, %61 + %63 = load i32, i32* @clear_flg, align 4 + %64 = icmp sgt i32 %63, 0 + %65 = select i1 %62, i1 true, i1 %64 + br i1 %65, label %66, label %128 + +66: ; preds = %59 + %67 = load i32, i32* @offset, align 4, !tbaa !11 + %68 = icmp sgt i32 %67, 0 + br i1 %68, label %69, label %90 + +69: ; preds = %66 + %70 = load i32, i32* @n_bits, align 4, !tbaa !11 + %71 = icmp sgt i32 %70, 0 + br i1 %71, label %72, label %85 + +72: ; preds = %69 + %73 = add i32 %70, -1 + %74 = call i32 @llvm.umin.i32(i32 %73, i32 15) #6 + %75 = add nuw nsw i32 %74, 1 + %76 = zext i32 %75 to i64 + br label %77 + +77: ; preds = %77, %72 + %78 = phi i64 [ 0, %72 ], [ %83, %77 ] + %79 = getelementptr inbounds [16 x i8], [16 x i8]* @buf, i64 0, i64 %78 + %80 = load i8, i8* %79, align 1, !tbaa !5 + %81 = load i8*, i8** @OutBuff, align 8, !tbaa !15 + %82 = getelementptr inbounds i8, i8* %81, i64 1 + store i8* %82, i8** @OutBuff, align 8, !tbaa !15 + store i8 %80, i8* %81, align 1, !tbaa !5 + %83 = add nuw nsw i64 %78, 1 + %84 = icmp eq i64 %83, %76 + br i1 %84, label %85, label %77, !llvm.loop !22 + +85: ; preds = %77, %69 + %86 = load i32, i32* @n_bits, align 4, !tbaa !11 + %87 = sext i32 %86 to i64 + %88 = load i64, i64* @bytes_out, align 8, !tbaa !13 + %89 = add nsw i64 %88, %87 + store i64 %89, i64* @bytes_out, align 8, !tbaa !13 + br label %90 + +90: ; preds = %85, %66 + store i32 0, i32* @offset, align 4, !tbaa !11 + %91 = load i32, i32* @clear_flg, align 4, !tbaa !11 + %92 = icmp eq i32 %91, 0 + br i1 %92, label %94, label %93 + +93: ; preds = %90 + store i32 9, i32* @n_bits, align 4, !tbaa !11 + store i64 511, i64* @maxcode, align 8, !tbaa !13 + store i32 0, i32* @clear_flg, align 4, !tbaa !11 + br label %128 + +94: ; preds = %90 + %95 = load i32, i32* @n_bits, align 4, !tbaa !11 + %96 = add nsw i32 %95, 1 + store i32 %96, i32* @n_bits, align 4, !tbaa !11 + %97 = load i32, i32* @maxbits, align 4, !tbaa !11 + %98 = icmp eq i32 %96, %97 + br i1 %98, label %99, label %101 + +99: ; preds = %94 + %100 = load i64, i64* @maxmaxcode, align 8, !tbaa !13 + store i64 %100, i64* @maxcode, align 8, !tbaa !13 + br label %128 + +101: ; preds = %94 + %102 = shl nsw i32 -1, %96 + %103 = xor i32 %102, -1 + %104 = sext i32 %103 to i64 + store i64 %104, i64* @maxcode, align 8, !tbaa !13 + br label %128 + +105: ; preds = %1 + %106 = icmp sgt i32 %2, 0 + br i1 %106, label %107, label %122 + +107: ; preds = %105 + %108 = add nsw i32 %2, 7 + %109 = sdiv i32 %108, 8 + %110 = add nsw i32 %109, -1 + %111 = call i32 @llvm.umin.i32(i32 %110, i32 15) #6 + %112 = add nuw nsw i32 %111, 1 + %113 = zext i32 %112 to i64 + br label %114 + +114: ; preds = %114, %107 + %115 = phi i64 [ 0, %107 ], [ %120, %114 ] + %116 = getelementptr inbounds [16 x i8], [16 x i8]* @buf, i64 0, i64 %115 + %117 = load i8, i8* %116, align 1, !tbaa !5 + %118 = load i8*, i8** @OutBuff, align 8, !tbaa !15 + %119 = getelementptr inbounds i8, i8* %118, i64 1 + store i8* %119, i8** @OutBuff, align 8, !tbaa !15 + store i8 %117, i8* %118, align 1, !tbaa !5 + %120 = add nuw nsw i64 %115, 1 + %121 = icmp eq i64 %120, %113 + br i1 %121, label %122, label %114, !llvm.loop !22 + +122: ; preds = %114, %105 + %123 = add nsw i32 %2, 7 + %124 = sdiv i32 %123, 8 + %125 = sext i32 %124 to i64 + %126 = load i64, i64* @bytes_out, align 8, !tbaa !13 + %127 = add nsw i64 %126, %125 + store i64 %127, i64* @bytes_out, align 8, !tbaa !13 + store i32 0, i32* @offset, align 4, !tbaa !11 + br label %128 + +128: ; preds = %59, %99, %101, %93, %122 + ret void +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn +define dso_local void @putbyte(i8 signext %0) local_unnamed_addr #2 { + %2 = load i8*, i8** @OutBuff, align 8, !tbaa !15 + %3 = getelementptr inbounds i8, i8* %2, i64 1 + store i8* %3, i8** @OutBuff, align 8, !tbaa !15 + store i8 %0, i8* %2, align 1, !tbaa !5 + ret void +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local void @writebytes(i8* nocapture readonly %0, i32 %1) local_unnamed_addr #3 { + %3 = icmp sgt i32 %1, 0 + br i1 %3, label %4, label %17 + +4: ; preds = %2 + %5 = add i32 %1, -1 + %6 = call i32 @llvm.umin.i32(i32 %5, i32 15) + %7 = add nuw nsw i32 %6, 1 + %8 = zext i32 %7 to i64 + br label %9 + +9: ; preds = %4, %9 + %10 = phi i64 [ 0, %4 ], [ %15, %9 ] + %11 = getelementptr inbounds i8, i8* %0, i64 %10 + %12 = load i8, i8* %11, align 1, !tbaa !5 + %13 = load i8*, i8** @OutBuff, align 8, !tbaa !15 + %14 = getelementptr inbounds i8, i8* %13, i64 1 + store i8* %14, i8** @OutBuff, align 8, !tbaa !15 + store i8 %12, i8* %13, align 1, !tbaa !5 + %15 = add nuw nsw i64 %10, 1 + %16 = icmp eq i64 %15, %8 + br i1 %16, label %17, label %9, !llvm.loop !22 + +17: ; preds = %9, %2 + ret void +} + +; Function Attrs: nofree nosync nounwind readnone speculatable willreturn +declare i64 @llvm.smin.i64(i64, i64) #4 + +; Function Attrs: argmemonly nofree nounwind willreturn writeonly +declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg) #5 + +; Function Attrs: nofree nosync nounwind readnone speculatable willreturn +declare i32 @llvm.umin.i32(i32, i32) #4 + +attributes #0 = { nofree nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #1 = { nofree norecurse nosync nounwind sspstrong uwtable writeonly "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #2 = { mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #3 = { nofree norecurse nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #4 = { nofree nosync nounwind readnone speculatable willreturn } +attributes #5 = { argmemonly nofree nounwind willreturn writeonly } +attributes #6 = { nounwind } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = !{!6, !6, i64 0} +!6 = !{!"omnipotent char", !7, i64 0} +!7 = !{!"Simple C/C++ TBAA"} +!8 = distinct !{!8, !9, !10} +!9 = !{!"llvm.loop.mustprogress"} +!10 = !{!"llvm.loop.unroll.disable"} +!11 = !{!12, !12, i64 0} +!12 = !{!"int", !6, i64 0} +!13 = !{!14, !14, i64 0} +!14 = !{!"long", !6, i64 0} +!15 = !{!16, !16, i64 0} +!16 = !{!"any pointer", !6, i64 0} +!17 = distinct !{!17, !9, !10} +!18 = !{!19, !19, i64 0} +!19 = !{!"short", !6, i64 0} +!20 = distinct !{!20, !9, !10} +!21 = distinct !{!21, !9, !10} +!22 = distinct !{!22, !9, !10} diff --git a/test/cover.ll b/test/cover.ll new file mode 100644 index 0000000..bae6789 --- /dev/null +++ b/test/cover.ll @@ -0,0 +1,119 @@ +; ModuleID = 'cover.c' +source_filename = "cover.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +; Function Attrs: nofree norecurse nosync nounwind readnone sspstrong uwtable +define dso_local i32 @swi120(i32 %0) local_unnamed_addr #0 { + br label %2 + +2: ; preds = %1, %2 + %3 = phi i32 [ 0, %1 ], [ %8, %2 ] + %4 = phi i32 [ %0, %1 ], [ %7, %2 ] + %5 = icmp ult i32 %3, 120 + %6 = select i1 %5, i32 1, i32 -1 + %7 = add nsw i32 %4, %6 + %8 = add nuw nsw i32 %3, 1 + %9 = icmp eq i32 %8, 120 + br i1 %9, label %10, label %2, !llvm.loop !5 + +10: ; preds = %2 + ret i32 %7 +} + +; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn +declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #1 + +; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn +declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) #1 + +; Function Attrs: nofree norecurse nosync nounwind readnone sspstrong uwtable +define dso_local i32 @swi50(i32 %0) local_unnamed_addr #0 { + br label %2 + +2: ; preds = %1, %2 + %3 = phi i32 [ 0, %1 ], [ %8, %2 ] + %4 = phi i32 [ %0, %1 ], [ %7, %2 ] + %5 = icmp ult i32 %3, 60 + %6 = select i1 %5, i32 1, i32 -1 + %7 = add nsw i32 %4, %6 + %8 = add nuw nsw i32 %3, 1 + %9 = icmp eq i32 %8, 50 + br i1 %9, label %10, label %2, !llvm.loop !8 + +10: ; preds = %2 + ret i32 %7 +} + +; Function Attrs: nofree norecurse nosync nounwind readnone sspstrong uwtable +define dso_local i32 @swi10(i32 %0) local_unnamed_addr #0 { + br label %2 + +2: ; preds = %1, %2 + %3 = phi i32 [ 0, %1 ], [ %8, %2 ] + %4 = phi i32 [ %0, %1 ], [ %7, %2 ] + %5 = icmp ult i32 %3, 10 + %6 = select i1 %5, i32 1, i32 -1 + %7 = add nsw i32 %4, %6 + %8 = add nuw nsw i32 %3, 1 + %9 = icmp eq i32 %8, 10 + br i1 %9, label %10, label %2, !llvm.loop !9 + +10: ; preds = %2 + ret i32 %7 +} + +; Function Attrs: nofree nounwind sspstrong uwtable +define dso_local i32 @main() local_unnamed_addr #2 { + %1 = alloca i32, align 4 + %2 = bitcast i32* %1 to i8* + call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %2) + store volatile i32 0, i32* %1, align 4, !tbaa !10 + %3 = load volatile i32, i32* %1, align 4, !tbaa !10 + br label %4 + +4: ; preds = %4, %0 + %5 = phi i32 [ 0, %0 ], [ %10, %4 ] + %6 = phi i32 [ %3, %0 ], [ %9, %4 ] + %7 = icmp ult i32 %5, 10 + %8 = select i1 %7, i32 1, i32 -1 + %9 = add nsw i32 %6, %8 + %10 = add nuw nsw i32 %5, 1 + %11 = icmp eq i32 %10, 10 + br i1 %11, label %12, label %4, !llvm.loop !9 + +12: ; preds = %4 + store volatile i32 %9, i32* %1, align 4, !tbaa !10 + %13 = load volatile i32, i32* %1, align 4, !tbaa !10 + %14 = call i32 @swi50(i32 %13) + store volatile i32 %14, i32* %1, align 4, !tbaa !10 + %15 = load volatile i32, i32* %1, align 4, !tbaa !10 + %16 = call i32 @swi120(i32 %15) + store volatile i32 %16, i32* %1, align 4, !tbaa !10 + %17 = load volatile i32, i32* %1, align 4, !tbaa !10 + %18 = bitcast i32* %1 to i8* + call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %18) + ret i32 %17 +} + +attributes #0 = { nofree norecurse nosync nounwind readnone sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #1 = { argmemonly mustprogress nofree nosync nounwind willreturn } +attributes #2 = { nofree nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = distinct !{!5, !6, !7} +!6 = !{!"llvm.loop.mustprogress"} +!7 = !{!"llvm.loop.unroll.disable"} +!8 = distinct !{!8, !6, !7} +!9 = distinct !{!9, !6, !7} +!10 = !{!11, !11, i64 0} +!11 = !{!"int", !12, i64 0} +!12 = !{!"omnipotent char", !13, i64 0} +!13 = !{!"Simple C/C++ TBAA"} diff --git a/test/crc.ll b/test/crc.ll new file mode 100644 index 0000000..89ab0a0 --- /dev/null +++ b/test/crc.ll @@ -0,0 +1,313 @@ +; ModuleID = 'crc.c' +source_filename = "crc.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +@lin = dso_local local_unnamed_addr global [256 x i8] c"asdffeagewaHAFEFaeDsFEawFdsFaefaeerdjgp\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00", align 16 +@icrc.icrctb = internal unnamed_addr global [256 x i16] zeroinitializer, align 16 +@icrc.init = internal unnamed_addr global i1 false, align 2 +@icrc.rchr = internal unnamed_addr global [256 x i8] zeroinitializer, align 16 +@icrc.it = internal unnamed_addr constant [16 x i8] c"\00\08\04\0C\02\0A\06\0E\01\09\05\0D\03\0B\07\0F", align 16 + +; Function Attrs: nofree norecurse nosync nounwind readnone sspstrong uwtable +define dso_local zeroext i16 @icrc1(i16 zeroext %0, i8 zeroext %1) local_unnamed_addr #0 { + %3 = zext i8 %1 to i16 + %4 = shl nuw i16 %3, 8 + %5 = xor i16 %4, %0 + br label %6 + +6: ; preds = %2, %6 + %7 = phi i16 [ %5, %2 ], [ %12, %6 ] + %8 = phi i32 [ 0, %2 ], [ %13, %6 ] + %9 = icmp sgt i16 %7, -1 + %10 = shl i16 %7, 1 + %11 = xor i16 %10, 4129 + %12 = select i1 %9, i16 %10, i16 %11 + %13 = add nuw nsw i32 %8, 1 + %14 = icmp eq i32 %13, 8 + br i1 %14, label %15, label %6, !llvm.loop !5 + +15: ; preds = %6 + ret i16 %12 +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local zeroext i16 @icrc(i16 zeroext %0, i64 %1, i16 signext %2, i32 %3) local_unnamed_addr #1 { + %5 = load i1, i1* @icrc.init, align 2 + br i1 %5, label %37, label %6 + +6: ; preds = %4 + store i1 true, i1* @icrc.init, align 2 + br label %7 + +7: ; preds = %6, %21 + %8 = phi i64 [ 0, %6 ], [ %34, %21 ] + %9 = phi i32 [ 0, %6 ], [ %35, %21 ] + %10 = trunc i64 %8 to i16 + %11 = shl i16 %10, 8 + br label %12 + +12: ; preds = %12, %7 + %13 = phi i16 [ %11, %7 ], [ %18, %12 ] + %14 = phi i32 [ 0, %7 ], [ %19, %12 ] + %15 = icmp sgt i16 %13, -1 + %16 = shl i16 %13, 1 + %17 = xor i16 %16, 4129 + %18 = select i1 %15, i16 %16, i16 %17 + %19 = add nuw nsw i32 %14, 1 + %20 = icmp eq i32 %19, 8 + br i1 %20, label %21, label %12, !llvm.loop !5 + +21: ; preds = %12 + %22 = getelementptr inbounds [256 x i16], [256 x i16]* @icrc.icrctb, i64 0, i64 %8 + store i16 %18, i16* %22, align 2, !tbaa !8 + %23 = and i32 %9, 15 + %24 = zext i32 %23 to i64 + %25 = getelementptr inbounds [16 x i8], [16 x i8]* @icrc.it, i64 0, i64 %24 + %26 = load i8, i8* %25, align 1, !tbaa !12 + %27 = shl i8 %26, 4 + %28 = lshr i32 %9, 4 + %29 = zext i32 %28 to i64 + %30 = getelementptr inbounds [16 x i8], [16 x i8]* @icrc.it, i64 0, i64 %29 + %31 = load i8, i8* %30, align 1, !tbaa !12 + %32 = or i8 %27, %31 + %33 = getelementptr inbounds [256 x i8], [256 x i8]* @icrc.rchr, i64 0, i64 %8 + store i8 %32, i8* %33, align 1, !tbaa !12 + %34 = add nuw nsw i64 %8, 1 + %35 = trunc i64 %34 to i32 + %36 = icmp eq i64 %34, 256 + br i1 %36, label %37, label %7, !llvm.loop !13 + +37: ; preds = %21, %4 + %38 = icmp sgt i16 %2, -1 + br i1 %38, label %39, label %43 + +39: ; preds = %37 + %40 = and i16 %2, 255 + %41 = shl i16 %2, 8 + %42 = or i16 %41, %40 + br label %58 + +43: ; preds = %37 + %44 = icmp slt i32 %3, 0 + br i1 %44, label %45, label %58 + +45: ; preds = %43 + %46 = lshr i16 %0, 8 + %47 = zext i16 %46 to i64 + %48 = getelementptr inbounds [256 x i8], [256 x i8]* @icrc.rchr, i64 0, i64 %47 + %49 = load i8, i8* %48, align 1, !tbaa !12 + %50 = zext i8 %49 to i16 + %51 = and i16 %0, 255 + %52 = zext i16 %51 to i64 + %53 = getelementptr inbounds [256 x i8], [256 x i8]* @icrc.rchr, i64 0, i64 %52 + %54 = load i8, i8* %53, align 1, !tbaa !12 + %55 = zext i8 %54 to i16 + %56 = shl nuw i16 %55, 8 + %57 = or i16 %56, %50 + br label %58 + +58: ; preds = %43, %45, %39 + %59 = phi i16 [ %42, %39 ], [ %57, %45 ], [ %0, %43 ] + %60 = icmp slt i32 %3, 0 + %61 = icmp eq i64 %1, 0 + br i1 %61, label %85, label %62 + +62: ; preds = %58, %72 + %63 = phi i64 [ %83, %72 ], [ 1, %58 ] + %64 = phi i16 [ %81, %72 ], [ %59, %58 ] + %65 = phi i16 [ %82, %72 ], [ 1, %58 ] + %66 = getelementptr inbounds [256 x i8], [256 x i8]* @lin, i64 0, i64 %63 + %67 = load i8, i8* %66, align 1, !tbaa !12 + br i1 %60, label %68, label %72 + +68: ; preds = %62 + %69 = zext i8 %67 to i64 + %70 = getelementptr inbounds [256 x i8], [256 x i8]* @icrc.rchr, i64 0, i64 %69 + %71 = load i8, i8* %70, align 1, !tbaa !12 + br label %72 + +72: ; preds = %62, %68 + %73 = phi i8 [ %71, %68 ], [ %67, %62 ] + %74 = zext i8 %73 to i16 + %75 = lshr i16 %64, 8 + %76 = xor i16 %75, %74 + %77 = zext i16 %76 to i64 + %78 = getelementptr inbounds [256 x i16], [256 x i16]* @icrc.icrctb, i64 0, i64 %77 + %79 = load i16, i16* %78, align 2, !tbaa !8 + %80 = shl i16 %64, 8 + %81 = xor i16 %79, %80 + %82 = add i16 %65, 1 + %83 = zext i16 %82 to i64 + %84 = icmp ugt i64 %83, %1 + br i1 %84, label %85, label %62, !llvm.loop !14 + +85: ; preds = %72, %58 + %86 = phi i16 [ %59, %58 ], [ %81, %72 ] + %87 = icmp sgt i32 %3, -1 + br i1 %87, label %101, label %88 + +88: ; preds = %85 + %89 = lshr i16 %86, 8 + %90 = zext i16 %89 to i64 + %91 = getelementptr inbounds [256 x i8], [256 x i8]* @icrc.rchr, i64 0, i64 %90 + %92 = load i8, i8* %91, align 1, !tbaa !12 + %93 = zext i8 %92 to i16 + %94 = and i16 %86, 255 + %95 = zext i16 %94 to i64 + %96 = getelementptr inbounds [256 x i8], [256 x i8]* @icrc.rchr, i64 0, i64 %95 + %97 = load i8, i8* %96, align 1, !tbaa !12 + %98 = zext i8 %97 to i16 + %99 = shl nuw i16 %98, 8 + %100 = or i16 %99, %93 + br label %101 + +101: ; preds = %85, %88 + %102 = phi i16 [ %100, %88 ], [ %86, %85 ] + ret i16 %102 +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local i32 @main() local_unnamed_addr #1 { + store i8 0, i8* getelementptr inbounds ([256 x i8], [256 x i8]* @lin, i64 0, i64 41), align 1, !tbaa !12 + %1 = load i1, i1* @icrc.init, align 2 + br i1 %1, label %33, label %2 + +2: ; preds = %0 + store i1 true, i1* @icrc.init, align 2 + br label %3 + +3: ; preds = %17, %2 + %4 = phi i64 [ 0, %2 ], [ %30, %17 ] + %5 = phi i32 [ 0, %2 ], [ %31, %17 ] + %6 = trunc i64 %4 to i16 + %7 = shl i16 %6, 8 + br label %8 + +8: ; preds = %8, %3 + %9 = phi i16 [ %7, %3 ], [ %14, %8 ] + %10 = phi i32 [ 0, %3 ], [ %15, %8 ] + %11 = icmp sgt i16 %9, -1 + %12 = shl i16 %9, 1 + %13 = xor i16 %12, 4129 + %14 = select i1 %11, i16 %12, i16 %13 + %15 = add nuw nsw i32 %10, 1 + %16 = icmp eq i32 %15, 8 + br i1 %16, label %17, label %8, !llvm.loop !5 + +17: ; preds = %8 + %18 = getelementptr inbounds [256 x i16], [256 x i16]* @icrc.icrctb, i64 0, i64 %4 + store i16 %14, i16* %18, align 2, !tbaa !8 + %19 = and i32 %5, 15 + %20 = zext i32 %19 to i64 + %21 = getelementptr inbounds [16 x i8], [16 x i8]* @icrc.it, i64 0, i64 %20 + %22 = load i8, i8* %21, align 1, !tbaa !12 + %23 = shl i8 %22, 4 + %24 = lshr i32 %5, 4 + %25 = zext i32 %24 to i64 + %26 = getelementptr inbounds [16 x i8], [16 x i8]* @icrc.it, i64 0, i64 %25 + %27 = load i8, i8* %26, align 1, !tbaa !12 + %28 = or i8 %23, %27 + %29 = getelementptr inbounds [256 x i8], [256 x i8]* @icrc.rchr, i64 0, i64 %4 + store i8 %28, i8* %29, align 1, !tbaa !12 + %30 = add nuw nsw i64 %4, 1 + %31 = trunc i64 %30 to i32 + %32 = icmp eq i64 %30, 256 + br i1 %32, label %33, label %3, !llvm.loop !13 + +33: ; preds = %17, %0 + br label %34 + +34: ; preds = %33, %34 + %35 = phi i64 [ %47, %34 ], [ 1, %33 ] + %36 = phi i16 [ %46, %34 ], [ 0, %33 ] + %37 = getelementptr inbounds [256 x i8], [256 x i8]* @lin, i64 0, i64 %35 + %38 = load i8, i8* %37, align 1, !tbaa !12 + %39 = zext i8 %38 to i16 + %40 = lshr i16 %36, 8 + %41 = xor i16 %40, %39 + %42 = zext i16 %41 to i64 + %43 = getelementptr inbounds [256 x i16], [256 x i16]* @icrc.icrctb, i64 0, i64 %42 + %44 = load i16, i16* %43, align 2, !tbaa !8 + %45 = shl i16 %36, 8 + %46 = xor i16 %44, %45 + %47 = add nuw nsw i64 %35, 1 + %48 = icmp eq i64 %47, 41 + br i1 %48, label %49, label %34, !llvm.loop !14 + +49: ; preds = %34 + %50 = lshr i16 %46, 8 + %51 = trunc i16 %50 to i8 + store i8 %51, i8* getelementptr inbounds ([256 x i8], [256 x i8]* @lin, i64 0, i64 41), align 1, !tbaa !12 + %52 = trunc i16 %44 to i8 + store i8 %52, i8* getelementptr inbounds ([256 x i8], [256 x i8]* @lin, i64 0, i64 42), align 2, !tbaa !12 + %53 = load i1, i1* @icrc.init, align 2 + br i1 %53, label %85, label %54 + +54: ; preds = %49 + store i1 true, i1* @icrc.init, align 2 + br label %55 + +55: ; preds = %69, %54 + %56 = phi i64 [ 0, %54 ], [ %82, %69 ] + %57 = phi i32 [ 0, %54 ], [ %83, %69 ] + %58 = trunc i64 %56 to i16 + %59 = shl i16 %58, 8 + br label %60 + +60: ; preds = %60, %55 + %61 = phi i16 [ %59, %55 ], [ %66, %60 ] + %62 = phi i32 [ 0, %55 ], [ %67, %60 ] + %63 = icmp sgt i16 %61, -1 + %64 = shl i16 %61, 1 + %65 = xor i16 %64, 4129 + %66 = select i1 %63, i16 %64, i16 %65 + %67 = add nuw nsw i32 %62, 1 + %68 = icmp eq i32 %67, 8 + br i1 %68, label %69, label %60, !llvm.loop !5 + +69: ; preds = %60 + %70 = getelementptr inbounds [256 x i16], [256 x i16]* @icrc.icrctb, i64 0, i64 %56 + store i16 %66, i16* %70, align 2, !tbaa !8 + %71 = and i32 %57, 15 + %72 = zext i32 %71 to i64 + %73 = getelementptr inbounds [16 x i8], [16 x i8]* @icrc.it, i64 0, i64 %72 + %74 = load i8, i8* %73, align 1, !tbaa !12 + %75 = shl i8 %74, 4 + %76 = lshr i32 %57, 4 + %77 = zext i32 %76 to i64 + %78 = getelementptr inbounds [16 x i8], [16 x i8]* @icrc.it, i64 0, i64 %77 + %79 = load i8, i8* %78, align 1, !tbaa !12 + %80 = or i8 %75, %79 + %81 = getelementptr inbounds [256 x i8], [256 x i8]* @icrc.rchr, i64 0, i64 %56 + store i8 %80, i8* %81, align 1, !tbaa !12 + %82 = add nuw nsw i64 %56, 1 + %83 = trunc i64 %82 to i32 + %84 = icmp eq i64 %82, 256 + br i1 %84, label %85, label %55, !llvm.loop !13 + +85: ; preds = %69, %49 + ret i32 0 +} + +attributes #0 = { nofree norecurse nosync nounwind readnone sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #1 = { nofree norecurse nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = distinct !{!5, !6, !7} +!6 = !{!"llvm.loop.mustprogress"} +!7 = !{!"llvm.loop.unroll.disable"} +!8 = !{!9, !9, i64 0} +!9 = !{!"short", !10, i64 0} +!10 = !{!"omnipotent char", !11, i64 0} +!11 = !{!"Simple C/C++ TBAA"} +!12 = !{!10, !10, i64 0} +!13 = distinct !{!13, !6, !7} +!14 = distinct !{!14, !6, !7} diff --git a/test/dijkstra.ll b/test/dijkstra.ll new file mode 100644 index 0000000..ff30132 --- /dev/null +++ b/test/dijkstra.ll @@ -0,0 +1,456 @@ +; ModuleID = 'dijkstra.c' +source_filename = "dijkstra.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +%struct._QITEM = type { i64, i64, i64, %struct._QITEM* } +%struct._NODE = type { i64, i64 } + +@sanity = dso_local local_unnamed_addr global i32 512, align 4 +@alloc_pool = dso_local global [16384 x i8] zeroinitializer, align 16 +@alloc_ptr = dso_local local_unnamed_addr global i8* getelementptr inbounds ([16384 x i8], [16384 x i8]* @alloc_pool, i64 0, i64 0), align 8 +@dijkstra_input_data = dso_local local_unnamed_addr global [100 x [100 x i64]] [[100 x i64] [i64 32, i64 32, i64 54, i64 12, i64 52, i64 56, i64 8, i64 30, i64 44, i64 94, i64 44, i64 39, i64 65, i64 19, i64 51, i64 91, i64 1, i64 5, i64 89, i64 34, i64 25, i64 58, i64 20, i64 51, i64 38, i64 65, i64 30, i64 7, i64 20, i64 10, i64 51, i64 18, i64 43, i64 71, i64 97, i64 61, i64 26, i64 5, i64 57, i64 70, i64 65, i64 0, i64 75, i64 29, i64 86, i64 93, i64 87, i64 87, i64 64, i64 75, i64 88, i64 89, i64 100, i64 7, i64 40, i64 37, i64 38, i64 36, i64 44, i64 24, i64 46, i64 95, i64 43, i64 89, i64 32, i64 5, i64 15, i64 58, i64 77, i64 72, i64 95, i64 8, i64 38, i64 69, i64 37, i64 24, i64 27, i64 90, i64 77, i64 92, i64 31, i64 30, i64 80, i64 30, i64 37, i64 86, i64 33, i64 76, i64 21, i64 77, i64 100, i64 68, i64 37, i64 8, i64 22, i64 69, i64 81, i64 38, i64 94, i64 57], [100 x i64] [i64 76, i64 54, i64 65, i64 14, i64 89, i64 69, i64 4, i64 16, i64 24, i64 47, i64 7, i64 21, i64 78, i64 53, i64 17, i64 81, i64 39, i64 50, i64 22, i64 60, i64 93, i64 89, i64 94, i64 30, i64 97, i64 16, i64 65, i64 43, i64 20, i64 24, i64 67, i64 62, i64 78, i64 98, i64 42, i64 67, i64 32, i64 46, i64 49, i64 57, i64 60, i64 56, i64 44, i64 37, i64 75, i64 62, i64 17, i64 13, i64 11, i64 40, i64 40, i64 4, i64 95, i64 100, i64 0, i64 57, i64 82, i64 31, i64 0, i64 1, i64 56, i64 67, i64 30, i64 100, i64 64, i64 72, i64 66, i64 63, i64 18, i64 81, i64 19, i64 44, i64 2, i64 63, i64 81, i64 78, i64 91, i64 64, i64 91, i64 2, i64 70, i64 97, i64 73, i64 64, i64 97, i64 39, i64 21, i64 78, i64 70, i64 21, i64 46, i64 25, i64 54, i64 76, i64 92, i64 84, i64 47, i64 57, i64 46, i64 31], [100 x i64] [i64 38, i64 31, i64 75, i64 40, i64 61, i64 21, i64 84, i64 51, i64 86, i64 41, i64 19, i64 21, i64 37, i64 58, i64 86, i64 100, i64 97, i64 73, i64 44, i64 67, i64 60, i64 90, i64 58, i64 13, i64 31, i64 49, i64 63, i64 44, i64 73, i64 76, i64 76, i64 77, i64 73, i64 16, i64 83, i64 100, i64 4, i64 67, i64 51, i64 56, i64 7, i64 36, i64 77, i64 10, i64 95, i64 28, i64 10, i64 57, i64 0, i64 54, i64 23, i64 60, i64 9, i64 48, i64 39, i64 40, i64 97, i64 69, i64 84, i64 35, i64 44, i64 25, i64 11, i64 83, i64 8, i64 61, i64 83, i64 12, i64 27, i64 100, i64 34, i64 0, i64 35, i64 10, i64 10, i64 96, i64 39, i64 87, i64 53, i64 5, i64 40, i64 42, i64 66, i64 15, i64 90, i64 71, i64 55, i64 87, i64 39, i64 5, i64 88, i64 49, i64 97, i64 100, i64 32, i64 4, i64 60, i64 81, i64 83, i64 53], [100 x i64] [i64 80, i64 16, i64 53, i64 14, i64 94, i64 29, i64 77, i64 99, i64 16, i64 29, i64 3, i64 22, i64 71, i64 35, i64 4, i64 61, i64 6, i64 25, i64 13, i64 11, i64 30, i64 0, i64 27, i64 94, i64 66, i64 25, i64 64, i64 92, i64 5, i64 47, i64 44, i64 85, i64 29, i64 63, i64 65, i64 89, i64 59, i64 41, i64 87, i64 41, i64 36, i64 57, i64 29, i64 7, i64 92, i64 33, i64 34, i64 64, i64 59, i64 47, i64 76, i64 55, i64 13, i64 2, i64 48, i64 46, i64 27, i64 12, i64 37, i64 99, i64 25, i64 48, i64 83, i64 20, i64 77, i64 13, i64 9, i64 35, i64 55, i64 62, i64 76, i64 57, i64 18, i64 72, i64 64, i64 10, i64 4, i64 64, i64 74, i64 63, i64 77, i64 15, i64 18, i64 91, i64 84, i64 32, i64 36, i64 77, i64 10, i64 39, i64 75, i64 35, i64 87, i64 23, i64 22, i64 30, i64 37, i64 31, i64 65, i64 58], [100 x i64] [i64 59, i64 7, i64 14, i64 78, i64 79, i64 45, i64 54, i64 83, i64 8, i64 94, i64 12, i64 86, i64 9, i64 97, i64 42, i64 93, i64 95, i64 44, i64 70, i64 5, i64 83, i64 10, i64 40, i64 36, i64 34, i64 62, i64 66, i64 71, i64 59, i64 97, i64 95, i64 18, i64 3, i64 8, i64 62, i64 48, i64 19, i64 15, i64 98, i64 28, i64 8, i64 9, i64 80, i64 84, i64 72, i64 21, i64 43, i64 66, i64 65, i64 79, i64 71, i64 13, i64 89, i64 78, i64 49, i64 22, i64 5, i64 14, i64 59, i64 65, i64 11, i64 53, i64 49, i64 81, i64 28, i64 77, i64 29, i64 47, i64 92, i64 26, i64 41, i64 66, i64 1, i64 20, i64 50, i64 73, i64 7, i64 59, i64 4, i64 72, i64 37, i64 76, i64 86, i64 25, i64 19, i64 0, i64 14, i64 24, i64 15, i64 73, i64 55, i64 93, i64 93, i64 3, i64 73, i64 87, i64 80, i64 68, i64 100, i64 37], [100 x i64] [i64 94, i64 41, i64 3, i64 61, i64 27, i64 19, i64 33, i64 35, i64 78, i64 38, i64 73, i64 14, i64 80, i64 58, i64 5, i64 99, i64 59, i64 19, i64 22, i64 40, i64 59, i64 78, i64 32, i64 17, i64 47, i64 71, i64 3, i64 94, i64 39, i64 2, i64 97, i64 99, i64 9, i64 66, i64 60, i64 37, i64 85, i64 59, i64 38, i64 28, i64 63, i64 10, i64 8, i64 8, i64 35, i64 81, i64 6, i64 60, i64 100, i64 96, i64 66, i64 24, i64 39, i64 64, i64 41, i64 52, i64 34, i64 10, i64 11, i64 39, i64 80, i64 8, i64 4, i64 89, i64 74, i64 64, i64 92, i64 25, i64 89, i64 29, i64 19, i64 18, i64 6, i64 28, i64 26, i64 7, i64 8, i64 33, i64 67, i64 74, i64 95, i64 32, i64 99, i64 33, i64 96, i64 5, i64 51, i64 96, i64 83, i64 63, i64 35, i64 62, i64 71, i64 39, i64 16, i64 10, i64 69, i64 8, i64 35, i64 23], [100 x i64] [i64 3, i64 55, i64 41, i64 76, i64 49, i64 68, i64 83, i64 23, i64 67, i64 15, i64 97, i64 61, i64 13, i64 61, i64 60, i64 75, i64 33, i64 77, i64 71, i64 15, i64 39, i64 72, i64 43, i64 76, i64 77, i64 59, i64 53, i64 11, i64 33, i64 88, i64 34, i64 37, i64 8, i64 76, i64 79, i64 23, i64 9, i64 62, i64 46, i64 76, i64 43, i64 9, i64 2, i64 57, i64 70, i64 28, i64 31, i64 69, i64 4, i64 68, i64 84, i64 10, i64 39, i64 26, i64 52, i64 82, i64 52, i64 4, i64 93, i64 85, i64 59, i64 94, i64 21, i64 33, i64 35, i64 67, i64 57, i64 44, i64 28, i64 69, i64 86, i64 37, i64 78, i64 54, i64 94, i64 14, i64 48, i64 25, i64 83, i64 18, i64 59, i64 33, i64 28, i64 99, i64 25, i64 81, i64 46, i64 77, i64 51, i64 39, i64 62, i64 9, i64 32, i64 49, i64 43, i64 33, i64 15, i64 100, i64 77, i64 9], [100 x i64] [i64 68, i64 28, i64 47, i64 12, i64 82, i64 6, i64 26, i64 96, i64 98, i64 75, i64 13, i64 57, i64 7, i64 8, i64 55, i64 33, i64 55, i64 0, i64 76, i64 5, i64 5, i64 3, i64 15, i64 3, i64 53, i64 58, i64 36, i64 34, i64 23, i64 79, i64 10, i64 57, i64 6, i64 23, i64 69, i64 54, i64 29, i64 61, i64 49, i64 27, i64 36, i64 63, i64 84, i64 9, i64 71, i64 4, i64 8, i64 25, i64 71, i64 85, i64 97, i64 77, i64 88, i64 11, i64 46, i64 6, i64 35, i64 83, i64 7, i64 24, i64 27, i64 17, i64 82, i64 34, i64 40, i64 16, i64 88, i64 69, i64 44, i64 3, i64 62, i64 46, i64 32, i64 45, i64 55, i64 2, i64 49, i64 64, i64 94, i64 87, i64 14, i64 90, i64 63, i64 68, i64 68, i64 75, i64 75, i64 2, i64 23, i64 82, i64 27, i64 51, i64 65, i64 75, i64 85, i64 71, i64 57, i64 38, i64 39, i64 0], [100 x i64] [i64 7, i64 1, i64 46, i64 39, i64 12, i64 68, i64 41, i64 28, i64 31, i64 0, i64 14, i64 45, i64 91, i64 43, i64 12, i64 58, i64 17, i64 53, i64 26, i64 41, i64 0, i64 19, i64 92, i64 31, i64 60, i64 42, i64 1, i64 17, i64 46, i64 41, i64 84, i64 54, i64 8, i64 97, i64 93, i64 20, i64 64, i64 0, i64 14, i64 61, i64 0, i64 28, i64 72, i64 57, i64 71, i64 50, i64 81, i64 89, i64 70, i64 7, i64 96, i64 70, i64 26, i64 87, i64 1, i64 87, i64 95, i64 69, i64 70, i64 40, i64 9, i64 19, i64 94, i64 84, i64 15, i64 87, i64 71, i64 45, i64 87, i64 85, i64 5, i64 53, i64 13, i64 43, i64 10, i64 50, i64 94, i64 91, i64 38, i64 63, i64 98, i64 33, i64 99, i64 91, i64 86, i64 66, i64 43, i64 80, i64 35, i64 79, i64 20, i64 10, i64 98, i64 80, i64 61, i64 13, i64 66, i64 31, i64 24, i64 18], [100 x i64] [i64 82, i64 97, i64 72, i64 61, i64 39, i64 48, i64 11, i64 99, i64 38, i64 49, i64 27, i64 2, i64 49, i64 26, i64 59, i64 0, i64 58, i64 1, i64 81, i64 59, i64 80, i64 67, i64 70, i64 77, i64 46, i64 97, i64 56, i64 79, i64 27, i64 81, i64 63, i64 75, i64 77, i64 0, i64 36, i64 82, i64 48, i64 47, i64 81, i64 53, i64 62, i64 7, i64 55, i64 77, i64 100, i64 13, i64 78, i64 24, i64 81, i64 24, i64 83, i64 26, i64 91, i64 18, i64 2, i64 2, i64 14, i64 25, i64 47, i64 7, i64 72, i64 10, i64 83, i64 14, i64 10, i64 18, i64 96, i64 25, i64 65, i64 42, i64 78, i64 93, i64 16, i64 32, i64 70, i64 15, i64 11, i64 47, i64 5, i64 58, i64 71, i64 89, i64 84, i64 27, i64 73, i64 86, i64 96, i64 88, i64 77, i64 43, i64 95, i64 48, i64 19, i64 43, i64 62, i64 96, i64 61, i64 24, i64 20, i64 92], [100 x i64] [i64 66, i64 98, i64 85, i64 82, i64 96, i64 20, i64 64, i64 73, i64 67, i64 69, i64 30, i64 3, i64 23, i64 13, i64 97, i64 97, i64 66, i64 58, i64 50, i64 42, i64 0, i64 44, i64 57, i64 86, i64 54, i64 85, i64 82, i64 14, i64 8, i64 1, i64 73, i64 41, i64 66, i64 23, i64 22, i64 61, i64 43, i64 86, i64 0, i64 9, i64 21, i64 30, i64 79, i64 44, i64 44, i64 75, i64 40, i64 76, i64 99, i64 56, i64 17, i64 100, i64 67, i64 40, i64 51, i64 20, i64 25, i64 32, i64 0, i64 100, i64 0, i64 73, i64 40, i64 66, i64 96, i64 29, i64 93, i64 38, i64 81, i64 93, i64 13, i64 1, i64 90, i64 92, i64 46, i64 100, i64 32, i64 52, i64 75, i64 31, i64 8, i64 58, i64 97, i64 75, i64 99, i64 13, i64 61, i64 90, i64 46, i64 61, i64 89, i64 12, i64 34, i64 96, i64 78, i64 96, i64 24, i64 36, i64 34, i64 4], [100 x i64] [i64 96, i64 13, i64 73, i64 85, i64 72, i64 18, i64 50, i64 70, i64 36, i64 24, i64 67, i64 10, i64 82, i64 29, i64 51, i64 80, i64 43, i64 11, i64 35, i64 89, i64 39, i64 24, i64 0, i64 73, i64 86, i64 44, i64 34, i64 9, i64 46, i64 34, i64 80, i64 41, i64 48, i64 52, i64 92, i64 19, i64 36, i64 41, i64 55, i64 39, i64 31, i64 22, i64 49, i64 13, i64 51, i64 67, i64 59, i64 94, i64 44, i64 95, i64 48, i64 83, i64 85, i64 48, i64 21, i64 70, i64 58, i64 56, i64 45, i64 4, i64 90, i64 91, i64 11, i64 3, i64 43, i64 70, i64 89, i64 45, i64 77, i64 44, i64 84, i64 8, i64 66, i64 100, i64 88, i64 83, i64 66, i64 46, i64 77, i64 76, i64 6, i64 24, i64 59, i64 91, i64 39, i64 46, i64 26, i64 97, i64 68, i64 37, i64 0, i64 58, i64 28, i64 79, i64 27, i64 37, i64 48, i64 16, i64 82, i64 24], [100 x i64] [i64 60, i64 66, i64 32, i64 92, i64 65, i64 19, i64 74, i64 97, i64 32, i64 16, i64 72, i64 38, i64 41, i64 97, i64 96, i64 46, i64 43, i64 88, i64 42, i64 77, i64 25, i64 9, i64 34, i64 19, i64 88, i64 28, i64 56, i64 1, i64 44, i64 3, i64 25, i64 70, i64 69, i64 24, i64 27, i64 100, i64 9, i64 0, i64 96, i64 7, i64 84, i64 34, i64 12, i64 91, i64 30, i64 7, i64 36, i64 39, i64 95, i64 78, i64 16, i64 86, i64 53, i64 16, i64 71, i64 6, i64 44, i64 26, i64 7, i64 54, i64 30, i64 100, i64 23, i64 65, i64 23, i64 50, i64 65, i64 99, i64 17, i64 26, i64 73, i64 67, i64 60, i64 85, i64 57, i64 57, i64 92, i64 93, i64 96, i64 52, i64 36, i64 78, i64 4, i64 90, i64 61, i64 75, i64 96, i64 4, i64 68, i64 3, i64 25, i64 64, i64 69, i64 14, i64 28, i64 58, i64 31, i64 59, i64 56, i64 48], [100 x i64] [i64 86, i64 28, i64 81, i64 45, i64 12, i64 37, i64 1, i64 70, i64 29, i64 64, i64 89, i64 31, i64 41, i64 93, i64 20, i64 1, i64 67, i64 83, i64 73, i64 0, i64 52, i64 98, i64 64, i64 20, i64 78, i64 93, i64 78, i64 8, i64 17, i64 100, i64 22, i64 2, i64 95, i64 2, i64 48, i64 6, i64 39, i64 15, i64 43, i64 34, i64 79, i64 31, i64 66, i64 87, i64 23, i64 52, i64 54, i64 56, i64 34, i64 93, i64 57, i64 52, i64 56, i64 87, i64 72, i64 34, i64 79, i64 15, i64 42, i64 63, i64 15, i64 65, i64 65, i64 9, i64 67, i64 79, i64 82, i64 73, i64 95, i64 91, i64 6, i64 39, i64 21, i64 38, i64 92, i64 10, i64 91, i64 46, i64 67, i64 91, i64 38, i64 90, i64 43, i64 95, i64 76, i64 81, i64 28, i64 21, i64 63, i64 70, i64 84, i64 78, i64 0, i64 48, i64 53, i64 68, i64 94, i64 0, i64 40, i64 88], [100 x i64] [i64 92, i64 12, i64 93, i64 12, i64 17, i64 85, i64 23, i64 7, i64 30, i64 56, i64 64, i64 34, i64 45, i64 73, i64 28, i64 87, i64 20, i64 22, i64 7, i64 83, i64 59, i64 91, i64 26, i64 59, i64 5, i64 79, i64 26, i64 99, i64 79, i64 32, i64 52, i64 70, i64 11, i64 44, i64 83, i64 28, i64 95, i64 72, i64 1, i64 91, i64 27, i64 65, i64 25, i64 38, i64 4, i64 19, i64 24, i64 24, i64 8, i64 99, i64 73, i64 67, i64 89, i64 99, i64 25, i64 60, i64 77, i64 18, i64 24, i64 21, i64 16, i64 42, i64 58, i64 27, i64 53, i64 6, i64 55, i64 47, i64 78, i64 56, i64 38, i64 71, i64 88, i64 29, i64 8, i64 58, i64 48, i64 99, i64 48, i64 56, i64 97, i64 20, i64 89, i64 52, i64 18, i64 14, i64 78, i64 61, i64 99, i64 2, i64 48, i64 14, i64 44, i64 5, i64 42, i64 97, i64 11, i64 63, i64 10, i64 55], [100 x i64] [i64 19, i64 48, i64 25, i64 73, i64 77, i64 100, i64 30, i64 91, i64 99, i64 78, i64 13, i64 95, i64 98, i64 1, i64 12, i64 82, i64 82, i64 91, i64 8, i64 80, i64 93, i64 22, i64 61, i64 2, i64 28, i64 2, i64 66, i64 5, i64 65, i64 76, i64 61, i64 50, i64 90, i64 86, i64 22, i64 32, i64 52, i64 52, i64 22, i64 50, i64 96, i64 1, i64 10, i64 59, i64 70, i64 90, i64 40, i64 51, i64 80, i64 14, i64 98, i64 38, i64 37, i64 58, i64 40, i64 31, i64 60, i64 72, i64 2, i64 91, i64 47, i64 63, i64 7, i64 2, i64 15, i64 29, i64 34, i64 67, i64 48, i64 23, i64 83, i64 9, i64 24, i64 59, i64 69, i64 94, i64 48, i64 8, i64 11, i64 27, i64 90, i64 8, i64 31, i64 93, i64 32, i64 38, i64 90, i64 58, i64 9, i64 92, i64 48, i64 23, i64 55, i64 55, i64 25, i64 36, i64 51, i64 60, i64 69, i64 65], [100 x i64] [i64 83, i64 51, i64 74, i64 73, i64 76, i64 42, i64 67, i64 24, i64 17, i64 44, i64 17, i64 73, i64 18, i64 49, i64 65, i64 50, i64 87, i64 54, i64 7, i64 62, i64 11, i64 21, i64 85, i64 32, i64 77, i64 10, i64 68, i64 94, i64 70, i64 36, i64 24, i64 52, i64 53, i64 98, i64 24, i64 96, i64 6, i64 57, i64 86, i64 90, i64 67, i64 2, i64 62, i64 85, i64 17, i64 26, i64 34, i64 70, i64 46, i64 41, i64 32, i64 23, i64 63, i64 16, i64 56, i64 5, i64 26, i64 23, i64 65, i64 62, i64 26, i64 89, i64 80, i64 45, i64 52, i64 71, i64 6, i64 58, i64 27, i64 92, i64 47, i64 61, i64 61, i64 75, i64 45, i64 78, i64 67, i64 46, i64 14, i64 12, i64 53, i64 46, i64 36, i64 82, i64 28, i64 58, i64 87, i64 21, i64 47, i64 17, i64 83, i64 73, i64 72, i64 63, i64 85, i64 24, i64 33, i64 91, i64 48, i64 26], [100 x i64] [i64 49, i64 62, i64 53, i64 9, i64 36, i64 99, i64 53, i64 3, i64 10, i64 67, i64 82, i64 63, i64 79, i64 84, i64 45, i64 7, i64 41, i64 98, i64 95, i64 89, i64 82, i64 43, i64 27, i64 53, i64 5, i64 78, i64 77, i64 4, i64 69, i64 25, i64 98, i64 17, i64 53, i64 16, i64 93, i64 89, i64 81, i64 45, i64 58, i64 91, i64 12, i64 40, i64 54, i64 91, i64 90, i64 65, i64 64, i64 31, i64 62, i64 58, i64 86, i64 43, i64 1, i64 12, i64 63, i64 73, i64 91, i64 39, i64 44, i64 25, i64 30, i64 7, i64 8, i64 83, i64 23, i64 0, i64 38, i64 4, i64 45, i64 96, i64 61, i64 23, i64 1, i64 14, i64 81, i64 92, i64 45, i64 44, i64 89, i64 74, i64 69, i64 74, i64 83, i64 36, i64 52, i64 45, i64 75, i64 8, i64 85, i64 18, i64 100, i64 81, i64 92, i64 7, i64 30, i64 82, i64 74, i64 34, i64 52, i64 86], [100 x i64] [i64 96, i64 12, i64 8, i64 98, i64 94, i64 89, i64 55, i64 38, i64 100, i64 43, i64 11, i64 68, i64 83, i64 95, i64 3, i64 0, i64 39, i64 78, i64 9, i64 90, i64 63, i64 8, i64 37, i64 20, i64 83, i64 67, i64 1, i64 56, i64 67, i64 53, i64 7, i64 62, i64 66, i64 16, i64 25, i64 25, i64 71, i64 80, i64 63, i64 70, i64 89, i64 75, i64 3, i64 37, i64 35, i64 6, i64 38, i64 74, i64 51, i64 47, i64 30, i64 80, i64 21, i64 67, i64 100, i64 3, i64 100, i64 68, i64 26, i64 66, i64 87, i64 33, i64 27, i64 52, i64 15, i64 53, i64 43, i64 53, i64 99, i64 6, i64 22, i64 88, i64 47, i64 26, i64 24, i64 82, i64 99, i64 28, i64 21, i64 15, i64 75, i64 51, i64 95, i64 63, i64 84, i64 61, i64 66, i64 83, i64 28, i64 58, i64 14, i64 14, i64 58, i64 42, i64 33, i64 39, i64 61, i64 76, i64 92, i64 25], [100 x i64] [i64 48, i64 14, i64 79, i64 95, i64 6, i64 70, i64 76, i64 4, i64 98, i64 98, i64 87, i64 39, i64 14, i64 81, i64 1, i64 99, i64 7, i64 33, i64 81, i64 1, i64 92, i64 96, i64 16, i64 15, i64 3, i64 15, i64 54, i64 30, i64 57, i64 12, i64 55, i64 5, i64 93, i64 0, i64 100, i64 99, i64 70, i64 42, i64 69, i64 67, i64 39, i64 21, i64 5, i64 53, i64 2, i64 6, i64 51, i64 76, i64 40, i64 99, i64 78, i64 98, i64 60, i64 60, i64 79, i64 63, i64 75, i64 99, i64 59, i64 98, i64 10, i64 80, i64 2, i64 2, i64 80, i64 69, i64 67, i64 49, i64 10, i64 2, i64 16, i64 49, i64 23, i64 88, i64 68, i64 92, i64 95, i64 86, i64 68, i64 0, i64 84, i64 11, i64 64, i64 43, i64 71, i64 42, i64 72, i64 45, i64 40, i64 97, i64 42, i64 17, i64 76, i64 11, i64 86, i64 56, i64 80, i64 19, i64 4, i64 90], [100 x i64] [i64 88, i64 87, i64 4, i64 77, i64 75, i64 72, i64 69, i64 35, i64 23, i64 2, i64 35, i64 6, i64 80, i64 99, i64 15, i64 50, i64 6, i64 53, i64 61, i64 46, i64 49, i64 69, i64 29, i64 25, i64 80, i64 15, i64 47, i64 25, i64 34, i64 51, i64 14, i64 21, i64 38, i64 85, i64 98, i64 79, i64 57, i64 32, i64 13, i64 46, i64 0, i64 48, i64 53, i64 80, i64 12, i64 34, i64 29, i64 18, i64 54, i64 56, i64 30, i64 2, i64 25, i64 60, i64 94, i64 4, i64 41, i64 40, i64 30, i64 75, i64 58, i64 10, i64 62, i64 62, i64 96, i64 59, i64 40, i64 18, i64 58, i64 53, i64 64, i64 24, i64 67, i64 83, i64 4, i64 79, i64 17, i64 100, i64 63, i64 37, i64 56, i64 93, i64 39, i64 81, i64 18, i64 100, i64 51, i64 59, i64 5, i64 81, i64 100, i64 63, i64 58, i64 61, i64 24, i64 53, i64 87, i64 64, i64 37, i64 10], [100 x i64] [i64 83, i64 67, i64 34, i64 49, i64 50, i64 38, i64 27, i64 33, i64 4, i64 56, i64 70, i64 60, i64 15, i64 75, i64 6, i64 33, i64 40, i64 57, i64 59, i64 46, i64 4, i64 24, i64 75, i64 62, i64 86, i64 100, i64 81, i64 38, i64 29, i64 17, i64 48, i64 79, i64 84, i64 48, i64 27, i64 100, i64 87, i64 21, i64 32, i64 57, i64 77, i64 68, i64 16, i64 92, i64 9, i64 22, i64 92, i64 49, i64 79, i64 16, i64 95, i64 83, i64 40, i64 70, i64 10, i64 25, i64 35, i64 91, i64 29, i64 30, i64 74, i64 43, i64 8, i64 24, i64 92, i64 2, i64 23, i64 44, i64 23, i64 22, i64 0, i64 66, i64 56, i64 16, i64 58, i64 65, i64 4, i64 15, i64 14, i64 49, i64 31, i64 75, i64 32, i64 71, i64 10, i64 8, i64 63, i64 45, i64 100, i64 92, i64 42, i64 73, i64 1, i64 50, i64 97, i64 93, i64 18, i64 87, i64 36, i64 41], [100 x i64] [i64 75, i64 36, i64 7, i64 30, i64 18, i64 31, i64 96, i64 22, i64 12, i64 76, i64 71, i64 43, i64 50, i64 69, i64 80, i64 61, i64 78, i64 42, i64 72, i64 43, i64 0, i64 13, i64 15, i64 68, i64 30, i64 79, i64 60, i64 48, i64 31, i64 62, i64 56, i64 5, i64 98, i64 29, i64 1, i64 82, i64 26, i64 97, i64 3, i64 38, i64 72, i64 40, i64 81, i64 89, i64 76, i64 26, i64 15, i64 53, i64 35, i64 87, i64 96, i64 1, i64 67, i64 77, i64 69, i64 97, i64 21, i64 28, i64 10, i64 18, i64 90, i64 32, i64 23, i64 53, i64 61, i64 25, i64 34, i64 87, i64 88, i64 3, i64 91, i64 26, i64 9, i64 37, i64 81, i64 85, i64 64, i64 96, i64 3, i64 99, i64 82, i64 65, i64 100, i64 48, i64 42, i64 68, i64 10, i64 29, i64 62, i64 88, i64 48, i64 17, i64 19, i64 37, i64 70, i64 47, i64 28, i64 70, i64 100, i64 16], [100 x i64] [i64 73, i64 91, i64 8, i64 82, i64 94, i64 89, i64 33, i64 57, i64 84, i64 36, i64 21, i64 31, i64 1, i64 87, i64 46, i64 9, i64 20, i64 56, i64 4, i64 82, i64 9, i64 52, i64 99, i64 96, i64 56, i64 34, i64 8, i64 84, i64 3, i64 7, i64 66, i64 42, i64 64, i64 74, i64 24, i64 58, i64 28, i64 23, i64 81, i64 11, i64 59, i64 2, i64 9, i64 26, i64 55, i64 55, i64 1, i64 76, i64 77, i64 6, i64 23, i64 87, i64 24, i64 89, i64 82, i64 80, i64 22, i64 90, i64 30, i64 93, i64 63, i64 96, i64 34, i64 27, i64 36, i64 24, i64 51, i64 30, i64 47, i64 98, i64 8, i64 73, i64 100, i64 17, i64 99, i64 21, i64 72, i64 0, i64 97, i64 48, i64 73, i64 86, i64 34, i64 97, i64 74, i64 82, i64 43, i64 63, i64 37, i64 73, i64 55, i64 0, i64 34, i64 55, i64 94, i64 36, i64 80, i64 10, i64 67, i64 93], [100 x i64] [i64 7, i64 75, i64 65, i64 74, i64 92, i64 64, i64 95, i64 63, i64 30, i64 57, i64 77, i64 2, i64 42, i64 11, i64 65, i64 16, i64 59, i64 7, i64 45, i64 97, i64 46, i64 66, i64 63, i64 81, i64 20, i64 56, i64 83, i64 66, i64 32, i64 49, i64 59, i64 39, i64 90, i64 23, i64 12, i64 81, i64 53, i64 73, i64 9, i64 49, i64 29, i64 87, i64 17, i64 72, i64 64, i64 83, i64 54, i64 89, i64 90, i64 65, i64 85, i64 36, i64 30, i64 13, i64 83, i64 16, i64 35, i64 65, i64 83, i64 67, i64 14, i64 7, i64 73, i64 70, i64 97, i64 85, i64 51, i64 16, i64 24, i64 26, i64 65, i64 53, i64 79, i64 83, i64 91, i64 8, i64 65, i64 10, i64 98, i64 20, i64 41, i64 48, i64 22, i64 71, i64 62, i64 4, i64 54, i64 63, i64 36, i64 36, i64 30, i64 16, i64 9, i64 2, i64 86, i64 5, i64 53, i64 36, i64 88, i64 77], [100 x i64] [i64 29, i64 53, i64 97, i64 74, i64 1, i64 53, i64 83, i64 32, i64 30, i64 46, i64 52, i64 71, i64 94, i64 41, i64 42, i64 21, i64 45, i64 62, i64 85, i64 81, i64 98, i64 81, i64 97, i64 73, i64 83, i64 83, i64 44, i64 1, i64 85, i64 32, i64 45, i64 80, i64 85, i64 41, i64 54, i64 52, i64 60, i64 2, i64 84, i64 90, i64 48, i64 1, i64 61, i64 7, i64 42, i64 69, i64 96, i64 54, i64 30, i64 46, i64 0, i64 94, i64 26, i64 64, i64 32, i64 75, i64 46, i64 76, i64 42, i64 97, i64 7, i64 87, i64 43, i64 58, i64 94, i64 97, i64 9, i64 54, i64 99, i64 59, i64 43, i64 12, i64 61, i64 70, i64 19, i64 69, i64 4, i64 14, i64 22, i64 0, i64 26, i64 23, i64 60, i64 52, i64 53, i64 92, i64 93, i64 65, i64 68, i64 35, i64 61, i64 75, i64 88, i64 70, i64 33, i64 82, i64 66, i64 8, i64 35, i64 30], [100 x i64] [i64 68, i64 44, i64 8, i64 95, i64 81, i64 28, i64 63, i64 85, i64 8, i64 52, i64 86, i64 35, i64 41, i64 11, i64 53, i64 94, i64 3, i64 12, i64 58, i64 71, i64 13, i64 85, i64 11, i64 0, i64 55, i64 44, i64 82, i64 87, i64 19, i64 83, i64 84, i64 87, i64 27, i64 92, i64 81, i64 7, i64 86, i64 9, i64 58, i64 61, i64 27, i64 9, i64 62, i64 68, i64 21, i64 81, i64 61, i64 24, i64 93, i64 85, i64 61, i64 72, i64 70, i64 72, i64 73, i64 91, i64 16, i64 20, i64 77, i64 35, i64 3, i64 26, i64 88, i64 97, i64 18, i64 34, i64 3, i64 70, i64 9, i64 27, i64 30, i64 37, i64 37, i64 92, i64 4, i64 24, i64 73, i64 32, i64 48, i64 31, i64 83, i64 8, i64 3, i64 52, i64 80, i64 42, i64 8, i64 62, i64 62, i64 52, i64 63, i64 65, i64 78, i64 16, i64 27, i64 62, i64 50, i64 30, i64 32, i64 26], [100 x i64] [i64 24, i64 62, i64 63, i64 27, i64 20, i64 67, i64 51, i64 59, i64 65, i64 65, i64 90, i64 48, i64 73, i64 93, i64 66, i64 18, i64 0, i64 75, i64 47, i64 63, i64 26, i64 76, i64 94, i64 3, i64 59, i64 21, i64 66, i64 75, i64 17, i64 64, i64 0, i64 41, i64 25, i64 63, i64 68, i64 11, i64 97, i64 85, i64 70, i64 61, i64 49, i64 60, i64 8, i64 88, i64 18, i64 41, i64 6, i64 19, i64 15, i64 19, i64 48, i64 41, i64 61, i64 41, i64 10, i64 19, i64 62, i64 42, i64 95, i64 46, i64 5, i64 95, i64 53, i64 98, i64 58, i64 21, i64 8, i64 20, i64 5, i64 79, i64 81, i64 21, i64 4, i64 56, i64 8, i64 89, i64 97, i64 81, i64 74, i64 11, i64 100, i64 21, i64 18, i64 61, i64 29, i64 95, i64 46, i64 57, i64 37, i64 40, i64 2, i64 42, i64 1, i64 56, i64 5, i64 59, i64 43, i64 14, i64 79, i64 14], [100 x i64] [i64 59, i64 25, i64 35, i64 29, i64 81, i64 44, i64 84, i64 43, i64 24, i64 58, i64 20, i64 91, i64 45, i64 38, i64 17, i64 74, i64 100, i64 63, i64 31, i64 36, i64 3, i64 33, i64 44, i64 71, i64 55, i64 50, i64 96, i64 98, i64 30, i64 40, i64 12, i64 55, i64 65, i64 13, i64 50, i64 12, i64 57, i64 33, i64 55, i64 48, i64 91, i64 42, i64 38, i64 36, i64 46, i64 55, i64 76, i64 45, i64 17, i64 6, i64 81, i64 87, i64 6, i64 25, i64 57, i64 61, i64 41, i64 52, i64 25, i64 37, i64 92, i64 3, i64 92, i64 23, i64 16, i64 7, i64 35, i64 74, i64 40, i64 56, i64 21, i64 98, i64 98, i64 59, i64 100, i64 44, i64 80, i64 75, i64 89, i64 97, i64 82, i64 36, i64 50, i64 54, i64 27, i64 6, i64 14, i64 68, i64 25, i64 5, i64 4, i64 83, i64 8, i64 62, i64 5, i64 25, i64 69, i64 40, i64 65, i64 75], [100 x i64] [i64 63, i64 52, i64 72, i64 60, i64 10, i64 71, i64 70, i64 56, i64 12, i64 59, i64 52, i64 94, i64 95, i64 68, i64 13, i64 21, i64 41, i64 94, i64 55, i64 66, i64 100, i64 25, i64 48, i64 7, i64 53, i64 54, i64 99, i64 88, i64 60, i64 63, i64 62, i64 22, i64 14, i64 34, i64 49, i64 91, i64 71, i64 18, i64 46, i64 83, i64 77, i64 65, i64 42, i64 37, i64 32, i64 55, i64 24, i64 39, i64 15, i64 45, i64 4, i64 14, i64 36, i64 19, i64 21, i64 89, i64 39, i64 87, i64 76, i64 99, i64 49, i64 4, i64 88, i64 64, i64 4, i64 36, i64 54, i64 75, i64 20, i64 67, i64 24, i64 64, i64 31, i64 32, i64 0, i64 29, i64 54, i64 92, i64 69, i64 69, i64 36, i64 39, i64 83, i64 39, i64 58, i64 70, i64 27, i64 63, i64 56, i64 70, i64 28, i64 5, i64 74, i64 15, i64 35, i64 78, i64 17, i64 55, i64 18, i64 37], [100 x i64] [i64 88, i64 8, i64 0, i64 85, i64 41, i64 68, i64 14, i64 95, i64 59, i64 49, i64 63, i64 61, i64 54, i64 11, i64 66, i64 79, i64 81, i64 94, i64 41, i64 3, i64 29, i64 69, i64 75, i64 69, i64 50, i64 9, i64 46, i64 33, i64 30, i64 30, i64 71, i64 18, i64 39, i64 37, i64 2, i64 80, i64 4, i64 83, i64 40, i64 29, i64 98, i64 2, i64 57, i64 52, i64 13, i64 22, i64 30, i64 60, i64 82, i64 71, i64 29, i64 10, i64 6, i64 3, i64 79, i64 22, i64 79, i64 91, i64 56, i64 76, i64 21, i64 26, i64 94, i64 26, i64 63, i64 62, i64 72, i64 34, i64 45, i64 11, i64 29, i64 42, i64 13, i64 86, i64 94, i64 93, i64 75, i64 90, i64 18, i64 56, i64 27, i64 48, i64 33, i64 33, i64 17, i64 78, i64 55, i64 63, i64 69, i64 10, i64 38, i64 56, i64 2, i64 31, i64 48, i64 32, i64 93, i64 19, i64 32, i64 3], [100 x i64] [i64 30, i64 61, i64 46, i64 43, i64 13, i64 5, i64 1, i64 88, i64 96, i64 86, i64 9, i64 89, i64 100, i64 42, i64 21, i64 17, i64 20, i64 42, i64 80, i64 55, i64 19, i64 17, i64 10, i64 88, i64 14, i64 58, i64 19, i64 6, i64 77, i64 17, i64 77, i64 73, i64 79, i64 22, i64 15, i64 58, i64 94, i64 83, i64 45, i64 55, i64 68, i64 20, i64 43, i64 68, i64 63, i64 30, i64 51, i64 49, i64 39, i64 97, i64 3, i64 58, i64 13, i64 80, i64 45, i64 27, i64 3, i64 31, i64 100, i64 80, i64 48, i64 76, i64 52, i64 93, i64 64, i64 33, i64 50, i64 24, i64 82, i64 61, i64 45, i64 15, i64 82, i64 89, i64 49, i64 10, i64 85, i64 100, i64 59, i64 23, i64 96, i64 28, i64 81, i64 75, i64 7, i64 93, i64 68, i64 10, i64 90, i64 34, i64 56, i64 3, i64 76, i64 74, i64 97, i64 6, i64 73, i64 12, i64 30, i64 20], [100 x i64] [i64 40, i64 75, i64 35, i64 88, i64 29, i64 85, i64 64, i64 14, i64 50, i64 22, i64 37, i64 12, i64 16, i64 85, i64 87, i64 23, i64 77, i64 21, i64 100, i64 66, i64 55, i64 21, i64 35, i64 30, i64 95, i64 31, i64 2, i64 33, i64 10, i64 32, i64 53, i64 16, i64 74, i64 54, i64 70, i64 69, i64 38, i64 33, i64 83, i64 55, i64 55, i64 87, i64 67, i64 71, i64 71, i64 19, i64 60, i64 13, i64 40, i64 25, i64 45, i64 61, i64 46, i64 80, i64 58, i64 6, i64 78, i64 60, i64 39, i64 88, i64 93, i64 58, i64 70, i64 32, i64 11, i64 39, i64 0, i64 16, i64 72, i64 50, i64 71, i64 93, i64 36, i64 37, i64 29, i64 6, i64 56, i64 55, i64 19, i64 63, i64 80, i64 64, i64 23, i64 25, i64 43, i64 81, i64 98, i64 87, i64 41, i64 2, i64 40, i64 100, i64 60, i64 9, i64 31, i64 37, i64 14, i64 98, i64 53, i64 86], [100 x i64] [i64 47, i64 90, i64 44, i64 83, i64 26, i64 73, i64 55, i64 49, i64 27, i64 40, i64 11, i64 73, i64 70, i64 0, i64 64, i64 13, i64 82, i64 61, i64 66, i64 89, i64 29, i64 6, i64 88, i64 89, i64 15, i64 85, i64 93, i64 30, i64 82, i64 11, i64 82, i64 96, i64 1, i64 26, i64 78, i64 27, i64 65, i64 100, i64 42, i64 93, i64 39, i64 53, i64 31, i64 9, i64 54, i64 96, i64 89, i64 1, i64 22, i64 54, i64 90, i64 52, i64 60, i64 43, i64 6, i64 42, i64 27, i64 99, i64 72, i64 75, i64 10, i64 19, i64 70, i64 11, i64 45, i64 14, i64 4, i64 10, i64 13, i64 47, i64 69, i64 52, i64 66, i64 100, i64 27, i64 86, i64 61, i64 15, i64 53, i64 84, i64 36, i64 42, i64 35, i64 96, i64 85, i64 41, i64 37, i64 78, i64 40, i64 75, i64 53, i64 16, i64 95, i64 22, i64 94, i64 5, i64 36, i64 98, i64 15, i64 15], [100 x i64] [i64 10, i64 50, i64 34, i64 77, i64 16, i64 61, i64 28, i64 77, i64 43, i64 82, i64 60, i64 79, i64 90, i64 95, i64 74, i64 41, i64 2, i64 78, i64 18, i64 8, i64 18, i64 71, i64 24, i64 12, i64 60, i64 17, i64 85, i64 62, i64 81, i64 66, i64 78, i64 92, i64 16, i64 11, i64 34, i64 32, i64 38, i64 28, i64 75, i64 81, i64 9, i64 1, i64 59, i64 66, i64 62, i64 100, i64 6, i64 64, i64 43, i64 24, i64 72, i64 61, i64 62, i64 62, i64 40, i64 21, i64 79, i64 24, i64 49, i64 26, i64 90, i64 26, i64 84, i64 72, i64 3, i64 84, i64 70, i64 8, i64 11, i64 45, i64 89, i64 88, i64 46, i64 14, i64 53, i64 74, i64 80, i64 59, i64 38, i64 89, i64 83, i64 9, i64 15, i64 10, i64 38, i64 55, i64 31, i64 83, i64 45, i64 81, i64 8, i64 1, i64 73, i64 92, i64 73, i64 43, i64 75, i64 9, i64 51, i64 53], [100 x i64] [i64 54, i64 5, i64 40, i64 66, i64 86, i64 59, i64 39, i64 31, i64 17, i64 43, i64 19, i64 66, i64 19, i64 1, i64 77, i64 57, i64 22, i64 74, i64 39, i64 68, i64 20, i64 14, i64 35, i64 60, i64 5, i64 7, i64 2, i64 47, i64 16, i64 19, i64 66, i64 36, i64 91, i64 5, i64 68, i64 43, i64 30, i64 74, i64 40, i64 47, i64 83, i64 26, i64 79, i64 1, i64 27, i64 21, i64 24, i64 49, i64 96, i64 64, i64 83, i64 82, i64 78, i64 17, i64 41, i64 49, i64 92, i64 9, i64 62, i64 74, i64 28, i64 27, i64 77, i64 86, i64 99, i64 44, i64 95, i64 28, i64 84, i64 34, i64 41, i64 33, i64 60, i64 20, i64 34, i64 87, i64 41, i64 59, i64 36, i64 2, i64 89, i64 85, i64 85, i64 32, i64 2, i64 25, i64 47, i64 94, i64 35, i64 9, i64 67, i64 29, i64 2, i64 43, i64 81, i64 1, i64 54, i64 75, i64 96, i64 3], [100 x i64] [i64 9, i64 37, i64 36, i64 35, i64 23, i64 37, i64 22, i64 30, i64 62, i64 24, i64 33, i64 50, i64 8, i64 84, i64 48, i64 77, i64 8, i64 95, i64 70, i64 9, i64 70, i64 37, i64 5, i64 73, i64 46, i64 86, i64 74, i64 100, i64 27, i64 35, i64 70, i64 2, i64 72, i64 5, i64 37, i64 95, i64 42, i64 25, i64 25, i64 3, i64 49, i64 24, i64 19, i64 24, i64 7, i64 67, i64 0, i64 82, i64 28, i64 71, i64 92, i64 98, i64 74, i64 63, i64 70, i64 86, i64 14, i64 9, i64 52, i64 41, i64 45, i64 21, i64 43, i64 83, i64 93, i64 47, i64 44, i64 35, i64 72, i64 35, i64 4, i64 88, i64 59, i64 91, i64 11, i64 32, i64 57, i64 11, i64 13, i64 51, i64 48, i64 71, i64 49, i64 88, i64 33, i64 85, i64 40, i64 48, i64 61, i64 92, i64 55, i64 5, i64 79, i64 65, i64 54, i64 71, i64 11, i64 98, i64 72, i64 83], [100 x i64] [i64 32, i64 43, i64 70, i64 57, i64 33, i64 47, i64 89, i64 56, i64 25, i64 69, i64 7, i64 73, i64 39, i64 56, i64 27, i64 39, i64 6, i64 67, i64 53, i64 67, i64 24, i64 74, i64 38, i64 2, i64 38, i64 93, i64 73, i64 49, i64 56, i64 11, i64 99, i64 89, i64 54, i64 34, i64 11, i64 87, i64 48, i64 67, i64 42, i64 73, i64 35, i64 49, i64 11, i64 40, i64 71, i64 4, i64 45, i64 78, i64 71, i64 98, i64 10, i64 95, i64 38, i64 49, i64 63, i64 76, i64 41, i64 36, i64 92, i64 97, i64 47, i64 56, i64 51, i64 0, i64 56, i64 63, i64 53, i64 3, i64 29, i64 95, i64 76, i64 30, i64 44, i64 54, i64 70, i64 81, i64 58, i64 82, i64 58, i64 96, i64 45, i64 69, i64 56, i64 83, i64 84, i64 19, i64 59, i64 24, i64 21, i64 16, i64 87, i64 34, i64 72, i64 4, i64 0, i64 27, i64 33, i64 53, i64 31, i64 28], [100 x i64] [i64 47, i64 73, i64 58, i64 57, i64 26, i64 94, i64 38, i64 85, i64 75, i64 62, i64 80, i64 87, i64 97, i64 35, i64 69, i64 80, i64 20, i64 27, i64 3, i64 41, i64 43, i64 57, i64 75, i64 81, i64 27, i64 75, i64 8, i64 60, i64 27, i64 5, i64 88, i64 41, i64 78, i64 11, i64 98, i64 71, i64 71, i64 1, i64 55, i64 12, i64 64, i64 0, i64 99, i64 60, i64 1, i64 67, i64 40, i64 22, i64 61, i64 9, i64 63, i64 70, i64 32, i64 4, i64 51, i64 59, i64 79, i64 25, i64 18, i64 73, i64 30, i64 72, i64 13, i64 7, i64 49, i64 77, i64 78, i64 87, i64 79, i64 99, i64 99, i64 42, i64 65, i64 63, i64 68, i64 67, i64 96, i64 7, i64 55, i64 56, i64 84, i64 84, i64 93, i64 15, i64 88, i64 43, i64 75, i64 33, i64 34, i64 59, i64 72, i64 64, i64 98, i64 85, i64 37, i64 12, i64 27, i64 82, i64 99, i64 5], [100 x i64] [i64 80, i64 63, i64 13, i64 11, i64 92, i64 48, i64 44, i64 88, i64 55, i64 99, i64 9, i64 4, i64 48, i64 1, i64 20, i64 2, i64 10, i64 61, i64 1, i64 44, i64 86, i64 73, i64 74, i64 83, i64 23, i64 11, i64 62, i64 50, i64 93, i64 26, i64 22, i64 38, i64 90, i64 1, i64 15, i64 47, i64 49, i64 59, i64 34, i64 71, i64 23, i64 44, i64 75, i64 38, i64 11, i64 61, i64 40, i64 22, i64 21, i64 41, i64 32, i64 7, i64 13, i64 6, i64 56, i64 36, i64 84, i64 17, i64 52, i64 76, i64 44, i64 74, i64 80, i64 100, i64 42, i64 96, i64 46, i64 91, i64 20, i64 81, i64 27, i64 10, i64 91, i64 2, i64 48, i64 1, i64 29, i64 88, i64 90, i64 51, i64 95, i64 22, i64 58, i64 7, i64 95, i64 13, i64 9, i64 78, i64 31, i64 61, i64 19, i64 41, i64 1, i64 65, i64 40, i64 43, i64 26, i64 86, i64 100, i64 47], [100 x i64] [i64 32, i64 94, i64 23, i64 22, i64 62, i64 71, i64 91, i64 91, i64 58, i64 80, i64 41, i64 18, i64 68, i64 65, i64 25, i64 62, i64 79, i64 0, i64 5, i64 76, i64 27, i64 24, i64 83, i64 28, i64 56, i64 22, i64 37, i64 82, i64 74, i64 3, i64 95, i64 6, i64 97, i64 17, i64 95, i64 24, i64 54, i64 85, i64 14, i64 78, i64 31, i64 56, i64 96, i64 99, i64 20, i64 87, i64 27, i64 65, i64 87, i64 32, i64 6, i64 14, i64 23, i64 89, i64 8, i64 45, i64 77, i64 12, i64 26, i64 51, i64 82, i64 88, i64 23, i64 44, i64 71, i64 17, i64 68, i64 25, i64 69, i64 82, i64 2, i64 100, i64 3, i64 99, i64 64, i64 91, i64 85, i64 91, i64 21, i64 38, i64 90, i64 28, i64 52, i64 79, i64 83, i64 26, i64 23, i64 60, i64 38, i64 49, i64 10, i64 86, i64 2, i64 33, i64 29, i64 74, i64 16, i64 97, i64 65, i64 51], [100 x i64] [i64 45, i64 67, i64 16, i64 48, i64 31, i64 81, i64 4, i64 16, i64 37, i64 26, i64 20, i64 93, i64 20, i64 38, i64 71, i64 2, i64 64, i64 94, i64 62, i64 69, i64 9, i64 72, i64 54, i64 11, i64 71, i64 84, i64 51, i64 54, i64 80, i64 15, i64 4, i64 24, i64 83, i64 88, i64 39, i64 80, i64 68, i64 43, i64 62, i64 71, i64 35, i64 82, i64 64, i64 55, i64 19, i64 0, i64 58, i64 84, i64 95, i64 19, i64 18, i64 3, i64 58, i64 72, i64 81, i64 95, i64 55, i64 32, i64 14, i64 1, i64 47, i64 19, i64 92, i64 96, i64 6, i64 30, i64 76, i64 40, i64 40, i64 37, i64 77, i64 75, i64 19, i64 6, i64 30, i64 38, i64 7, i64 54, i64 88, i64 68, i64 73, i64 5, i64 71, i64 97, i64 78, i64 51, i64 58, i64 99, i64 49, i64 72, i64 66, i64 97, i64 57, i64 58, i64 58, i64 63, i64 54, i64 33, i64 69, i64 60], [100 x i64] [i64 37, i64 12, i64 1, i64 56, i64 18, i64 31, i64 60, i64 92, i64 51, i64 14, i64 59, i64 90, i64 19, i64 29, i64 87, i64 63, i64 47, i64 10, i64 28, i64 96, i64 82, i64 94, i64 58, i64 39, i64 17, i64 16, i64 68, i64 38, i64 15, i64 3, i64 64, i64 52, i64 15, i64 65, i64 74, i64 100, i64 62, i64 0, i64 92, i64 12, i64 14, i64 50, i64 2, i64 33, i64 46, i64 55, i64 63, i64 59, i64 65, i64 91, i64 20, i64 46, i64 50, i64 79, i64 51, i64 34, i64 61, i64 19, i64 72, i64 76, i64 89, i64 35, i64 95, i64 3, i64 67, i64 68, i64 69, i64 28, i64 68, i64 60, i64 41, i64 82, i64 77, i64 43, i64 82, i64 22, i64 98, i64 44, i64 47, i64 28, i64 0, i64 67, i64 74, i64 50, i64 11, i64 92, i64 84, i64 72, i64 77, i64 21, i64 14, i64 65, i64 23, i64 8, i64 34, i64 90, i64 42, i64 2, i64 84, i64 10], [100 x i64] [i64 63, i64 24, i64 58, i64 5, i64 33, i64 5, i64 94, i64 97, i64 15, i64 40, i64 24, i64 15, i64 6, i64 65, i64 32, i64 18, i64 56, i64 82, i64 56, i64 32, i64 70, i64 70, i64 97, i64 93, i64 78, i64 30, i64 48, i64 87, i64 99, i64 31, i64 97, i64 27, i64 22, i64 20, i64 32, i64 55, i64 93, i64 25, i64 52, i64 7, i64 31, i64 42, i64 90, i64 4, i64 6, i64 88, i64 89, i64 62, i64 35, i64 44, i64 60, i64 4, i64 81, i64 56, i64 63, i64 24, i64 52, i64 10, i64 10, i64 17, i64 8, i64 73, i64 44, i64 30, i64 94, i64 77, i64 51, i64 86, i64 68, i64 69, i64 59, i64 66, i64 11, i64 48, i64 70, i64 84, i64 1, i64 58, i64 12, i64 37, i64 68, i64 72, i64 41, i64 48, i64 95, i64 71, i64 73, i64 12, i64 47, i64 83, i64 29, i64 55, i64 56, i64 74, i64 51, i64 15, i64 16, i64 2, i64 67, i64 50], [100 x i64] [i64 71, i64 92, i64 15, i64 82, i64 6, i64 51, i64 66, i64 7, i64 75, i64 44, i64 44, i64 43, i64 15, i64 52, i64 57, i64 9, i64 22, i64 96, i64 89, i64 35, i64 79, i64 17, i64 91, i64 0, i64 57, i64 7, i64 82, i64 73, i64 9, i64 14, i64 90, i64 81, i64 5, i64 4, i64 28, i64 11, i64 22, i64 60, i64 19, i64 97, i64 3, i64 29, i64 5, i64 86, i64 81, i64 63, i64 61, i64 69, i64 58, i64 49, i64 71, i64 2, i64 67, i64 27, i64 69, i64 90, i64 34, i64 50, i64 29, i64 44, i64 64, i64 18, i64 91, i64 36, i64 89, i64 85, i64 47, i64 10, i64 45, i64 32, i64 7, i64 14, i64 62, i64 12, i64 100, i64 8, i64 41, i64 61, i64 44, i64 100, i64 9, i64 14, i64 68, i64 42, i64 41, i64 37, i64 99, i64 75, i64 87, i64 27, i64 85, i64 17, i64 45, i64 75, i64 53, i64 33, i64 26, i64 66, i64 10, i64 71], [100 x i64] [i64 99, i64 84, i64 85, i64 60, i64 62, i64 51, i64 68, i64 3, i64 11, i64 11, i64 69, i64 87, i64 92, i64 36, i64 96, i64 32, i64 39, i64 94, i64 74, i64 93, i64 87, i64 58, i64 9, i64 31, i64 100, i64 28, i64 30, i64 25, i64 94, i64 6, i64 62, i64 92, i64 90, i64 12, i64 17, i64 52, i64 29, i64 86, i64 55, i64 40, i64 63, i64 90, i64 94, i64 21, i64 92, i64 55, i64 53, i64 31, i64 14, i64 93, i64 23, i64 0, i64 17, i64 99, i64 98, i64 16, i64 26, i64 27, i64 7, i64 86, i64 34, i64 35, i64 78, i64 90, i64 13, i64 95, i64 41, i64 43, i64 46, i64 62, i64 49, i64 76, i64 51, i64 42, i64 97, i64 9, i64 63, i64 15, i64 40, i64 77, i64 8, i64 63, i64 43, i64 25, i64 61, i64 40, i64 7, i64 53, i64 68, i64 81, i64 38, i64 68, i64 82, i64 82, i64 57, i64 95, i64 43, i64 65, i64 37, i64 55], [100 x i64] [i64 93, i64 87, i64 30, i64 10, i64 95, i64 93, i64 19, i64 58, i64 75, i64 59, i64 0, i64 83, i64 88, i64 44, i64 74, i64 14, i64 50, i64 47, i64 67, i64 17, i64 94, i64 71, i64 51, i64 75, i64 53, i64 75, i64 69, i64 96, i64 5, i64 73, i64 16, i64 98, i64 59, i64 13, i64 7, i64 19, i64 5, i64 93, i64 43, i64 80, i64 17, i64 44, i64 28, i64 4, i64 54, i64 68, i64 18, i64 3, i64 14, i64 51, i64 88, i64 7, i64 22, i64 4, i64 48, i64 41, i64 45, i64 17, i64 2, i64 50, i64 90, i64 18, i64 14, i64 14, i64 31, i64 88, i64 33, i64 3, i64 81, i64 77, i64 49, i64 98, i64 87, i64 44, i64 2, i64 6, i64 11, i64 87, i64 76, i64 93, i64 4, i64 63, i64 66, i64 26, i64 34, i64 14, i64 33, i64 79, i64 98, i64 35, i64 29, i64 53, i64 19, i64 43, i64 67, i64 51, i64 30, i64 66, i64 20, i64 77], [100 x i64] [i64 8, i64 69, i64 75, i64 61, i64 79, i64 43, i64 33, i64 91, i64 96, i64 9, i64 49, i64 100, i64 38, i64 14, i64 25, i64 72, i64 28, i64 58, i64 51, i64 92, i64 59, i64 46, i64 44, i64 79, i64 55, i64 77, i64 96, i64 51, i64 9, i64 15, i64 28, i64 17, i64 50, i64 69, i64 45, i64 29, i64 11, i64 78, i64 86, i64 6, i64 53, i64 34, i64 73, i64 92, i64 48, i64 98, i64 29, i64 43, i64 22, i64 46, i64 34, i64 47, i64 92, i64 79, i64 25, i64 12, i64 55, i64 87, i64 64, i64 64, i64 68, i64 58, i64 48, i64 18, i64 93, i64 59, i64 13, i64 70, i64 2, i64 99, i64 76, i64 56, i64 32, i64 14, i64 13, i64 46, i64 12, i64 42, i64 89, i64 0, i64 89, i64 23, i64 13, i64 46, i64 1, i64 5, i64 59, i64 22, i64 92, i64 89, i64 53, i64 60, i64 12, i64 67, i64 44, i64 4, i64 92, i64 57, i64 74, i64 94], [100 x i64] [i64 55, i64 15, i64 15, i64 53, i64 30, i64 28, i64 99, i64 8, i64 71, i64 88, i64 75, i64 59, i64 77, i64 88, i64 4, i64 44, i64 93, i64 29, i64 66, i64 51, i64 17, i64 85, i64 10, i64 96, i64 17, i64 54, i64 100, i64 8, i64 77, i64 73, i64 2, i64 31, i64 89, i64 17, i64 50, i64 85, i64 46, i64 48, i64 93, i64 83, i64 35, i64 67, i64 7, i64 11, i64 54, i64 78, i64 21, i64 13, i64 7, i64 88, i64 64, i64 91, i64 38, i64 74, i64 87, i64 56, i64 94, i64 86, i64 64, i64 70, i64 25, i64 32, i64 67, i64 80, i64 50, i64 16, i64 64, i64 62, i64 30, i64 56, i64 10, i64 32, i64 89, i64 17, i64 9, i64 8, i64 95, i64 31, i64 21, i64 68, i64 18, i64 85, i64 59, i64 22, i64 24, i64 11, i64 78, i64 84, i64 97, i64 42, i64 19, i64 88, i64 40, i64 86, i64 67, i64 90, i64 68, i64 30, i64 17, i64 99], [100 x i64] [i64 52, i64 27, i64 30, i64 40, i64 44, i64 5, i64 49, i64 5, i64 36, i64 70, i64 73, i64 20, i64 21, i64 31, i64 43, i64 11, i64 42, i64 20, i64 96, i64 5, i64 28, i64 14, i64 93, i64 69, i64 67, i64 26, i64 24, i64 34, i64 56, i64 8, i64 99, i64 75, i64 35, i64 95, i64 14, i64 46, i64 0, i64 29, i64 51, i64 36, i64 66, i64 23, i64 57, i64 87, i64 21, i64 100, i64 98, i64 29, i64 86, i64 59, i64 0, i64 81, i64 74, i64 60, i64 15, i64 40, i64 86, i64 39, i64 40, i64 7, i64 47, i64 5, i64 82, i64 49, i64 100, i64 63, i64 95, i64 66, i64 92, i64 11, i64 2, i64 57, i64 0, i64 25, i64 9, i64 21, i64 91, i64 74, i64 17, i64 76, i64 32, i64 17, i64 22, i64 72, i64 43, i64 37, i64 78, i64 28, i64 77, i64 18, i64 36, i64 90, i64 90, i64 84, i64 38, i64 89, i64 46, i64 99, i64 21, i64 4], [100 x i64] [i64 9, i64 90, i64 27, i64 10, i64 14, i64 3, i64 98, i64 4, i64 77, i64 14, i64 46, i64 75, i64 99, i64 35, i64 47, i64 41, i64 72, i64 24, i64 70, i64 48, i64 8, i64 72, i64 4, i64 98, i64 55, i64 42, i64 53, i64 68, i64 7, i64 74, i64 72, i64 16, i64 63, i64 99, i64 26, i64 43, i64 1, i64 24, i64 13, i64 44, i64 4, i64 25, i64 19, i64 2, i64 60, i64 32, i64 10, i64 32, i64 22, i64 80, i64 46, i64 98, i64 17, i64 50, i64 95, i64 38, i64 59, i64 13, i64 5, i64 66, i64 87, i64 77, i64 48, i64 15, i64 42, i64 41, i64 58, i64 9, i64 31, i64 71, i64 54, i64 35, i64 97, i64 39, i64 4, i64 56, i64 37, i64 14, i64 88, i64 59, i64 60, i64 0, i64 56, i64 77, i64 50, i64 17, i64 81, i64 75, i64 30, i64 87, i64 6, i64 84, i64 29, i64 55, i64 99, i64 37, i64 96, i64 57, i64 47, i64 26], [100 x i64] [i64 94, i64 67, i64 27, i64 56, i64 5, i64 98, i64 12, i64 8, i64 11, i64 66, i64 67, i64 37, i64 66, i64 90, i64 80, i64 83, i64 6, i64 61, i64 23, i64 2, i64 47, i64 30, i64 86, i64 42, i64 51, i64 51, i64 80, i64 46, i64 74, i64 26, i64 38, i64 67, i64 59, i64 31, i64 23, i64 64, i64 29, i64 1, i64 38, i64 6, i64 33, i64 4, i64 44, i64 100, i64 60, i64 90, i64 48, i64 32, i64 50, i64 71, i64 1, i64 63, i64 67, i64 87, i64 5, i64 17, i64 3, i64 51, i64 29, i64 77, i64 77, i64 33, i64 10, i64 35, i64 65, i64 100, i64 65, i64 60, i64 0, i64 2, i64 32, i64 33, i64 73, i64 42, i64 99, i64 100, i64 32, i64 12, i64 31, i64 48, i64 84, i64 99, i64 11, i64 50, i64 86, i64 83, i64 34, i64 55, i64 33, i64 63, i64 32, i64 76, i64 97, i64 8, i64 77, i64 27, i64 7, i64 7, i64 53, i64 74], [100 x i64] [i64 76, i64 85, i64 73, i64 14, i64 27, i64 72, i64 13, i64 59, i64 50, i64 11, i64 73, i64 33, i64 9, i64 84, i64 50, i64 61, i64 32, i64 84, i64 16, i64 31, i64 12, i64 14, i64 6, i64 8, i64 89, i64 49, i64 1, i64 96, i64 56, i64 54, i64 35, i64 31, i64 39, i64 7, i64 46, i64 32, i64 45, i64 59, i64 57, i64 96, i64 36, i64 29, i64 95, i64 46, i64 80, i64 10, i64 73, i64 11, i64 94, i64 89, i64 9, i64 73, i64 69, i64 15, i64 47, i64 57, i64 31, i64 49, i64 18, i64 87, i64 69, i64 53, i64 18, i64 74, i64 27, i64 30, i64 5, i64 38, i64 55, i64 28, i64 33, i64 92, i64 58, i64 95, i64 3, i64 37, i64 4, i64 76, i64 14, i64 65, i64 31, i64 23, i64 37, i64 66, i64 5, i64 50, i64 23, i64 36, i64 99, i64 41, i64 22, i64 68, i64 61, i64 6, i64 7, i64 88, i64 2, i64 13, i64 92, i64 58], [100 x i64] [i64 41, i64 92, i64 15, i64 65, i64 86, i64 18, i64 1, i64 56, i64 60, i64 83, i64 87, i64 57, i64 5, i64 90, i64 23, i64 10, i64 40, i64 12, i64 12, i64 38, i64 19, i64 35, i64 72, i64 80, i64 7, i64 80, i64 33, i64 10, i64 59, i64 25, i64 34, i64 66, i64 16, i64 49, i64 31, i64 68, i64 33, i64 99, i64 23, i64 59, i64 47, i64 10, i64 16, i64 53, i64 100, i64 5, i64 29, i64 39, i64 17, i64 42, i64 44, i64 2, i64 43, i64 82, i64 49, i64 16, i64 27, i64 82, i64 93, i64 86, i64 73, i64 26, i64 18, i64 55, i64 75, i64 49, i64 89, i64 7, i64 13, i64 79, i64 33, i64 61, i64 55, i64 15, i64 80, i64 20, i64 20, i64 75, i64 60, i64 3, i64 83, i64 70, i64 5, i64 92, i64 17, i64 54, i64 8, i64 45, i64 2, i64 0, i64 30, i64 41, i64 27, i64 14, i64 63, i64 68, i64 29, i64 51, i64 42, i64 43], [100 x i64] [i64 96, i64 75, i64 70, i64 50, i64 90, i64 49, i64 71, i64 9, i64 90, i64 97, i64 79, i64 73, i64 66, i64 50, i64 64, i64 83, i64 4, i64 72, i64 27, i64 73, i64 39, i64 24, i64 80, i64 32, i64 4, i64 42, i64 100, i64 34, i64 60, i64 41, i64 43, i64 55, i64 82, i64 12, i64 5, i64 71, i64 27, i64 42, i64 46, i64 16, i64 38, i64 24, i64 89, i64 3, i64 41, i64 19, i64 52, i64 11, i64 57, i64 46, i64 84, i64 96, i64 36, i64 29, i64 27, i64 40, i64 72, i64 94, i64 40, i64 98, i64 0, i64 83, i64 18, i64 83, i64 95, i64 90, i64 53, i64 88, i64 31, i64 66, i64 71, i64 69, i64 56, i64 59, i64 38, i64 97, i64 44, i64 57, i64 7, i64 1, i64 2, i64 57, i64 97, i64 4, i64 87, i64 91, i64 10, i64 24, i64 84, i64 51, i64 21, i64 84, i64 33, i64 39, i64 66, i64 95, i64 96, i64 86, i64 82, i64 26], [100 x i64] [i64 51, i64 52, i64 96, i64 73, i64 78, i64 33, i64 70, i64 21, i64 90, i64 77, i64 89, i64 58, i64 0, i64 86, i64 28, i64 87, i64 42, i64 39, i64 10, i64 25, i64 56, i64 98, i64 75, i64 89, i64 2, i64 7, i64 49, i64 98, i64 59, i64 98, i64 24, i64 76, i64 15, i64 86, i64 48, i64 59, i64 18, i64 17, i64 81, i64 75, i64 61, i64 69, i64 99, i64 61, i64 20, i64 27, i64 13, i64 62, i64 32, i64 90, i64 53, i64 88, i64 87, i64 95, i64 42, i64 89, i64 1, i64 58, i64 53, i64 60, i64 55, i64 43, i64 1, i64 70, i64 28, i64 49, i64 29, i64 12, i64 33, i64 76, i64 53, i64 60, i64 10, i64 52, i64 87, i64 98, i64 45, i64 100, i64 25, i64 43, i64 89, i64 79, i64 97, i64 41, i64 73, i64 4, i64 96, i64 40, i64 62, i64 48, i64 66, i64 16, i64 91, i64 67, i64 53, i64 85, i64 82, i64 48, i64 98, i64 14], [100 x i64] [i64 90, i64 50, i64 74, i64 66, i64 68, i64 26, i64 63, i64 12, i64 25, i64 89, i64 55, i64 80, i64 33, i64 17, i64 20, i64 72, i64 22, i64 83, i64 11, i64 84, i64 30, i64 77, i64 67, i64 88, i64 9, i64 86, i64 72, i64 91, i64 33, i64 35, i64 72, i64 89, i64 86, i64 11, i64 54, i64 53, i64 38, i64 17, i64 32, i64 29, i64 72, i64 53, i64 76, i64 71, i64 71, i64 62, i64 42, i64 93, i64 44, i64 19, i64 76, i64 41, i64 62, i64 42, i64 28, i64 71, i64 27, i64 66, i64 27, i64 26, i64 1, i64 99, i64 14, i64 87, i64 10, i64 35, i64 5, i64 14, i64 52, i64 37, i64 43, i64 90, i64 91, i64 18, i64 60, i64 27, i64 81, i64 68, i64 19, i64 24, i64 87, i64 95, i64 31, i64 48, i64 3, i64 59, i64 18, i64 97, i64 92, i64 11, i64 90, i64 93, i64 10, i64 70, i64 45, i64 20, i64 4, i64 16, i64 34, i64 22], [100 x i64] [i64 54, i64 43, i64 11, i64 10, i64 62, i64 37, i64 37, i64 8, i64 4, i64 22, i64 99, i64 57, i64 83, i64 30, i64 4, i64 86, i64 55, i64 89, i64 49, i64 46, i64 0, i64 38, i64 38, i64 77, i64 74, i64 49, i64 97, i64 79, i64 66, i64 97, i64 0, i64 86, i64 5, i64 79, i64 62, i64 33, i64 15, i64 65, i64 41, i64 87, i64 87, i64 6, i64 9, i64 35, i64 2, i64 14, i64 21, i64 57, i64 69, i64 36, i64 3, i64 35, i64 40, i64 7, i64 11, i64 13, i64 23, i64 74, i64 92, i64 55, i64 36, i64 93, i64 40, i64 42, i64 37, i64 68, i64 75, i64 18, i64 32, i64 83, i64 71, i64 85, i64 89, i64 81, i64 19, i64 91, i64 61, i64 6, i64 13, i64 29, i64 8, i64 16, i64 65, i64 48, i64 91, i64 76, i64 62, i64 80, i64 16, i64 19, i64 34, i64 52, i64 78, i64 74, i64 94, i64 14, i64 7, i64 69, i64 33, i64 5], [100 x i64] [i64 17, i64 3, i64 56, i64 5, i64 84, i64 41, i64 62, i64 44, i64 48, i64 75, i64 40, i64 56, i64 58, i64 71, i64 71, i64 14, i64 12, i64 99, i64 94, i64 28, i64 17, i64 27, i64 81, i64 96, i64 67, i64 74, i64 76, i64 74, i64 8, i64 75, i64 45, i64 25, i64 79, i64 0, i64 97, i64 28, i64 41, i64 58, i64 39, i64 55, i64 100, i64 45, i64 11, i64 23, i64 15, i64 48, i64 37, i64 27, i64 46, i64 97, i64 56, i64 63, i64 90, i64 36, i64 24, i64 56, i64 76, i64 0, i64 96, i64 85, i64 41, i64 40, i64 9, i64 19, i64 6, i64 6, i64 14, i64 47, i64 30, i64 19, i64 2, i64 96, i64 64, i64 80, i64 18, i64 45, i64 27, i64 21, i64 72, i64 39, i64 17, i64 94, i64 1, i64 6, i64 96, i64 93, i64 28, i64 72, i64 59, i64 90, i64 56, i64 100, i64 96, i64 31, i64 86, i64 1, i64 3, i64 66, i64 15, i64 0], [100 x i64] [i64 85, i64 17, i64 96, i64 14, i64 63, i64 81, i64 59, i64 90, i64 1, i64 97, i64 28, i64 19, i64 57, i64 96, i64 92, i64 52, i64 54, i64 87, i64 23, i64 12, i64 76, i64 45, i64 79, i64 72, i64 43, i64 64, i64 39, i64 46, i64 29, i64 54, i64 12, i64 80, i64 37, i64 8, i64 60, i64 100, i64 89, i64 85, i64 55, i64 56, i64 47, i64 49, i64 75, i64 3, i64 45, i64 33, i64 56, i64 99, i64 19, i64 45, i64 78, i64 61, i64 91, i64 56, i64 99, i64 33, i64 86, i64 4, i64 45, i64 81, i64 58, i64 58, i64 60, i64 96, i64 32, i64 19, i64 61, i64 87, i64 70, i64 16, i64 42, i64 16, i64 65, i64 84, i64 20, i64 76, i64 83, i64 42, i64 41, i64 68, i64 87, i64 18, i64 28, i64 77, i64 40, i64 94, i64 76, i64 25, i64 98, i64 88, i64 5, i64 21, i64 11, i64 31, i64 16, i64 43, i64 16, i64 44, i64 29, i64 86], [100 x i64] [i64 60, i64 37, i64 1, i64 24, i64 20, i64 88, i64 67, i64 69, i64 29, i64 7, i64 36, i64 16, i64 25, i64 65, i64 59, i64 65, i64 24, i64 1, i64 56, i64 21, i64 89, i64 61, i64 42, i64 100, i64 58, i64 25, i64 8, i64 74, i64 69, i64 3, i64 25, i64 95, i64 40, i64 26, i64 85, i64 27, i64 81, i64 51, i64 96, i64 9, i64 58, i64 32, i64 25, i64 49, i64 63, i64 51, i64 80, i64 87, i64 52, i64 35, i64 74, i64 40, i64 62, i64 82, i64 5, i64 19, i64 73, i64 13, i64 59, i64 7, i64 16, i64 84, i64 1, i64 56, i64 77, i64 53, i64 49, i64 57, i64 3, i64 45, i64 66, i64 28, i64 43, i64 58, i64 77, i64 72, i64 8, i64 57, i64 58, i64 60, i64 92, i64 98, i64 66, i64 20, i64 79, i64 71, i64 39, i64 52, i64 84, i64 65, i64 59, i64 100, i64 48, i64 27, i64 21, i64 91, i64 80, i64 71, i64 47, i64 83], [100 x i64] [i64 82, i64 80, i64 10, i64 24, i64 37, i64 54, i64 62, i64 45, i64 10, i64 86, i64 71, i64 68, i64 83, i64 36, i64 88, i64 27, i64 6, i64 94, i64 79, i64 56, i64 58, i64 4, i64 55, i64 72, i64 98, i64 42, i64 63, i64 77, i64 12, i64 9, i64 25, i64 60, i64 89, i64 2, i64 50, i64 92, i64 56, i64 11, i64 2, i64 32, i64 97, i64 73, i64 100, i64 79, i64 75, i64 88, i64 73, i64 47, i64 47, i64 17, i64 2, i64 4, i64 21, i64 23, i64 42, i64 18, i64 66, i64 4, i64 61, i64 44, i64 81, i64 87, i64 71, i64 35, i64 89, i64 20, i64 27, i64 10, i64 32, i64 96, i64 42, i64 95, i64 69, i64 41, i64 40, i64 9, i64 95, i64 12, i64 23, i64 41, i64 29, i64 25, i64 11, i64 17, i64 15, i64 54, i64 1, i64 47, i64 24, i64 63, i64 57, i64 4, i64 49, i64 27, i64 40, i64 3, i64 48, i64 33, i64 13, i64 46], [100 x i64] [i64 95, i64 55, i64 40, i64 29, i64 96, i64 46, i64 39, i64 57, i64 58, i64 62, i64 98, i64 54, i64 53, i64 76, i64 71, i64 68, i64 29, i64 72, i64 81, i64 53, i64 34, i64 38, i64 24, i64 49, i64 65, i64 30, i64 52, i64 79, i64 29, i64 31, i64 24, i64 23, i64 86, i64 31, i64 53, i64 48, i64 77, i64 92, i64 4, i64 1, i64 19, i64 68, i64 55, i64 72, i64 9, i64 92, i64 6, i64 38, i64 63, i64 87, i64 58, i64 64, i64 24, i64 82, i64 79, i64 56, i64 78, i64 98, i64 34, i64 6, i64 28, i64 25, i64 29, i64 81, i64 22, i64 82, i64 28, i64 65, i64 39, i64 99, i64 66, i64 58, i64 32, i64 87, i64 97, i64 42, i64 78, i64 2, i64 46, i64 7, i64 55, i64 3, i64 71, i64 46, i64 51, i64 49, i64 1, i64 28, i64 46, i64 1, i64 34, i64 41, i64 26, i64 30, i64 21, i64 48, i64 11, i64 49, i64 80, i64 17], [100 x i64] [i64 13, i64 45, i64 75, i64 11, i64 99, i64 37, i64 53, i64 76, i64 39, i64 66, i64 83, i64 95, i64 35, i64 19, i64 40, i64 87, i64 69, i64 7, i64 81, i64 81, i64 8, i64 82, i64 21, i64 35, i64 11, i64 42, i64 49, i64 89, i64 57, i64 95, i64 5, i64 36, i64 40, i64 47, i64 14, i64 38, i64 84, i64 33, i64 80, i64 23, i64 99, i64 29, i64 84, i64 34, i64 48, i64 90, i64 87, i64 16, i64 97, i64 67, i64 64, i64 71, i64 48, i64 51, i64 72, i64 59, i64 60, i64 88, i64 48, i64 83, i64 82, i64 53, i64 86, i64 21, i64 66, i64 100, i64 25, i64 50, i64 32, i64 72, i64 39, i64 31, i64 0, i64 22, i64 65, i64 48, i64 78, i64 51, i64 31, i64 40, i64 84, i64 61, i64 10, i64 32, i64 11, i64 83, i64 57, i64 71, i64 70, i64 4, i64 20, i64 51, i64 24, i64 5, i64 39, i64 90, i64 4, i64 30, i64 5, i64 36], [100 x i64] [i64 1, i64 44, i64 33, i64 68, i64 66, i64 64, i64 16, i64 9, i64 81, i64 13, i64 49, i64 65, i64 74, i64 60, i64 97, i64 51, i64 42, i64 19, i64 89, i64 11, i64 24, i64 8, i64 28, i64 14, i64 13, i64 67, i64 70, i64 84, i64 64, i64 76, i64 86, i64 65, i64 19, i64 19, i64 100, i64 52, i64 83, i64 15, i64 61, i64 64, i64 95, i64 10, i64 95, i64 34, i64 70, i64 57, i64 85, i64 78, i64 76, i64 73, i64 55, i64 66, i64 47, i64 83, i64 80, i64 60, i64 16, i64 16, i64 9, i64 80, i64 92, i64 96, i64 10, i64 77, i64 14, i64 9, i64 28, i64 63, i64 91, i64 56, i64 93, i64 85, i64 32, i64 87, i64 18, i64 68, i64 43, i64 70, i64 45, i64 19, i64 42, i64 66, i64 85, i64 56, i64 48, i64 31, i64 82, i64 30, i64 47, i64 92, i64 9, i64 4, i64 87, i64 87, i64 81, i64 67, i64 96, i64 76, i64 29, i64 87], [100 x i64] [i64 31, i64 89, i64 37, i64 63, i64 75, i64 22, i64 97, i64 85, i64 92, i64 41, i64 70, i64 100, i64 73, i64 20, i64 55, i64 20, i64 51, i64 37, i64 17, i64 64, i64 28, i64 93, i64 68, i64 81, i64 79, i64 15, i64 47, i64 75, i64 91, i64 42, i64 27, i64 88, i64 30, i64 64, i64 16, i64 72, i64 52, i64 12, i64 56, i64 43, i64 19, i64 25, i64 43, i64 92, i64 45, i64 64, i64 78, i64 63, i64 0, i64 95, i64 26, i64 95, i64 54, i64 61, i64 75, i64 32, i64 76, i64 88, i64 73, i64 32, i64 30, i64 66, i64 86, i64 26, i64 97, i64 1, i64 98, i64 48, i64 80, i64 19, i64 92, i64 99, i64 10, i64 0, i64 56, i64 56, i64 64, i64 33, i64 85, i64 65, i64 95, i64 77, i64 59, i64 48, i64 3, i64 0, i64 46, i64 45, i64 88, i64 19, i64 77, i64 84, i64 51, i64 62, i64 10, i64 47, i64 29, i64 74, i64 96, i64 8], [100 x i64] [i64 94, i64 53, i64 73, i64 3, i64 53, i64 28, i64 25, i64 16, i64 62, i64 76, i64 47, i64 22, i64 53, i64 73, i64 70, i64 22, i64 73, i64 15, i64 68, i64 60, i64 0, i64 10, i64 44, i64 52, i64 73, i64 54, i64 65, i64 68, i64 94, i64 60, i64 77, i64 53, i64 79, i64 15, i64 23, i64 31, i64 44, i64 48, i64 14, i64 72, i64 91, i64 27, i64 94, i64 9, i64 100, i64 29, i64 31, i64 72, i64 44, i64 99, i64 32, i64 11, i64 9, i64 76, i64 29, i64 48, i64 96, i64 94, i64 15, i64 55, i64 20, i64 58, i64 8, i64 99, i64 40, i64 31, i64 97, i64 84, i64 45, i64 77, i64 55, i64 35, i64 3, i64 14, i64 44, i64 3, i64 43, i64 42, i64 75, i64 87, i64 40, i64 73, i64 64, i64 15, i64 14, i64 93, i64 29, i64 76, i64 53, i64 11, i64 31, i64 73, i64 69, i64 39, i64 37, i64 8, i64 70, i64 100, i64 58, i64 81], [100 x i64] [i64 76, i64 79, i64 16, i64 80, i64 93, i64 26, i64 49, i64 35, i64 68, i64 23, i64 89, i64 75, i64 63, i64 18, i64 56, i64 77, i64 11, i64 86, i64 53, i64 30, i64 97, i64 84, i64 2, i64 31, i64 89, i64 5, i64 6, i64 24, i64 5, i64 64, i64 4, i64 47, i64 43, i64 87, i64 26, i64 1, i64 13, i64 41, i64 3, i64 47, i64 65, i64 92, i64 88, i64 94, i64 9, i64 44, i64 70, i64 87, i64 29, i64 89, i64 16, i64 25, i64 72, i64 85, i64 56, i64 26, i64 57, i64 62, i64 50, i64 62, i64 93, i64 55, i64 8, i64 1, i64 7, i64 1, i64 2, i64 20, i64 42, i64 5, i64 34, i64 73, i64 63, i64 21, i64 66, i64 39, i64 31, i64 2, i64 25, i64 60, i64 91, i64 8, i64 51, i64 29, i64 59, i64 74, i64 55, i64 15, i64 1, i64 5, i64 77, i64 94, i64 26, i64 52, i64 95, i64 33, i64 19, i64 64, i64 20, i64 27], [100 x i64] [i64 35, i64 54, i64 0, i64 99, i64 41, i64 32, i64 37, i64 73, i64 34, i64 28, i64 99, i64 92, i64 2, i64 50, i64 20, i64 62, i64 23, i64 75, i64 77, i64 24, i64 46, i64 20, i64 85, i64 72, i64 38, i64 45, i64 72, i64 57, i64 75, i64 92, i64 84, i64 10, i64 11, i64 50, i64 75, i64 18, i64 83, i64 78, i64 91, i64 83, i64 72, i64 56, i64 74, i64 75, i64 72, i64 60, i64 36, i64 95, i64 1, i64 79, i64 85, i64 47, i64 99, i64 35, i64 19, i64 36, i64 47, i64 91, i64 59, i64 21, i64 48, i64 43, i64 31, i64 59, i64 59, i64 72, i64 77, i64 7, i64 49, i64 34, i64 91, i64 21, i64 56, i64 30, i64 96, i64 27, i64 57, i64 98, i64 88, i64 58, i64 76, i64 38, i64 4, i64 41, i64 74, i64 90, i64 43, i64 20, i64 46, i64 2, i64 7, i64 94, i64 11, i64 39, i64 18, i64 70, i64 77, i64 62, i64 78, i64 26], [100 x i64] [i64 62, i64 34, i64 47, i64 17, i64 30, i64 8, i64 10, i64 87, i64 72, i64 98, i64 44, i64 47, i64 1, i64 15, i64 54, i64 75, i64 4, i64 98, i64 61, i64 17, i64 100, i64 69, i64 10, i64 10, i64 74, i64 96, i64 46, i64 50, i64 23, i64 23, i64 42, i64 85, i64 23, i64 55, i64 68, i64 54, i64 29, i64 44, i64 40, i64 0, i64 41, i64 51, i64 14, i64 42, i64 66, i64 68, i64 84, i64 36, i64 31, i64 10, i64 53, i64 30, i64 45, i64 30, i64 6, i64 85, i64 25, i64 53, i64 1, i64 14, i64 42, i64 43, i64 65, i64 66, i64 65, i64 32, i64 86, i64 94, i64 42, i64 25, i64 95, i64 83, i64 42, i64 8, i64 91, i64 74, i64 42, i64 40, i64 10, i64 74, i64 51, i64 63, i64 70, i64 62, i64 59, i64 77, i64 47, i64 50, i64 96, i64 48, i64 64, i64 3, i64 57, i64 28, i64 35, i64 21, i64 26, i64 20, i64 15, i64 68], [100 x i64] [i64 12, i64 9, i64 16, i64 54, i64 84, i64 74, i64 28, i64 92, i64 13, i64 4, i64 65, i64 30, i64 33, i64 1, i64 93, i64 93, i64 78, i64 5, i64 42, i64 39, i64 53, i64 73, i64 42, i64 9, i64 0, i64 78, i64 98, i64 94, i64 98, i64 12, i64 61, i64 76, i64 88, i64 44, i64 30, i64 37, i64 17, i64 24, i64 28, i64 97, i64 28, i64 60, i64 27, i64 61, i64 27, i64 86, i64 53, i64 4, i64 91, i64 62, i64 9, i64 9, i64 34, i64 17, i64 85, i64 0, i64 61, i64 82, i64 94, i64 25, i64 60, i64 21, i64 0, i64 13, i64 65, i64 30, i64 50, i64 48, i64 54, i64 45, i64 44, i64 48, i64 71, i64 37, i64 9, i64 98, i64 89, i64 62, i64 68, i64 45, i64 23, i64 43, i64 54, i64 23, i64 60, i64 5, i64 24, i64 21, i64 87, i64 17, i64 12, i64 13, i64 4, i64 12, i64 26, i64 69, i64 9, i64 43, i64 83, i64 29], [100 x i64] [i64 88, i64 94, i64 78, i64 24, i64 30, i64 87, i64 21, i64 86, i64 14, i64 55, i64 30, i64 4, i64 98, i64 51, i64 27, i64 57, i64 56, i64 17, i64 44, i64 8, i64 35, i64 56, i64 21, i64 39, i64 69, i64 14, i64 75, i64 44, i64 57, i64 23, i64 73, i64 10, i64 16, i64 50, i64 34, i64 13, i64 2, i64 55, i64 99, i64 17, i64 9, i64 95, i64 21, i64 6, i64 45, i64 14, i64 29, i64 0, i64 32, i64 74, i64 9, i64 33, i64 96, i64 97, i64 38, i64 30, i64 10, i64 79, i64 74, i64 33, i64 2, i64 47, i64 43, i64 85, i64 63, i64 77, i64 98, i64 66, i64 98, i64 62, i64 83, i64 73, i64 57, i64 70, i64 45, i64 68, i64 50, i64 75, i64 69, i64 82, i64 14, i64 44, i64 81, i64 9, i64 6, i64 19, i64 40, i64 84, i64 64, i64 80, i64 16, i64 66, i64 26, i64 60, i64 51, i64 90, i64 36, i64 14, i64 55, i64 34], [100 x i64] [i64 43, i64 3, i64 73, i64 100, i64 73, i64 18, i64 67, i64 89, i64 93, i64 1, i64 37, i64 6, i64 11, i64 17, i64 82, i64 85, i64 2, i64 88, i64 68, i64 67, i64 68, i64 50, i64 99, i64 60, i64 9, i64 15, i64 49, i64 12, i64 30, i64 70, i64 12, i64 73, i64 73, i64 85, i64 38, i64 11, i64 2, i64 71, i64 67, i64 95, i64 39, i64 3, i64 67, i64 16, i64 20, i64 15, i64 0, i64 90, i64 69, i64 34, i64 22, i64 36, i64 85, i64 20, i64 63, i64 94, i64 36, i64 11, i64 72, i64 32, i64 48, i64 84, i64 71, i64 87, i64 69, i64 75, i64 65, i64 37, i64 11, i64 31, i64 99, i64 50, i64 34, i64 31, i64 33, i64 20, i64 46, i64 100, i64 76, i64 15, i64 34, i64 98, i64 17, i64 18, i64 18, i64 80, i64 78, i64 20, i64 58, i64 16, i64 18, i64 72, i64 100, i64 55, i64 58, i64 34, i64 96, i64 89, i64 72, i64 6], [100 x i64] [i64 86, i64 36, i64 23, i64 86, i64 67, i64 56, i64 6, i64 80, i64 21, i64 48, i64 61, i64 55, i64 46, i64 78, i64 39, i64 30, i64 24, i64 84, i64 50, i64 48, i64 100, i64 34, i64 19, i64 65, i64 89, i64 43, i64 100, i64 84, i64 32, i64 37, i64 56, i64 17, i64 73, i64 79, i64 3, i64 5, i64 0, i64 76, i64 85, i64 22, i64 23, i64 45, i64 43, i64 35, i64 23, i64 83, i64 65, i64 13, i64 32, i64 14, i64 61, i64 31, i64 14, i64 46, i64 96, i64 2, i64 89, i64 61, i64 52, i64 87, i64 64, i64 8, i64 4, i64 2, i64 53, i64 74, i64 8, i64 54, i64 15, i64 93, i64 42, i64 38, i64 4, i64 85, i64 40, i64 94, i64 67, i64 4, i64 6, i64 99, i64 86, i64 33, i64 96, i64 100, i64 79, i64 58, i64 69, i64 33, i64 85, i64 20, i64 20, i64 49, i64 95, i64 91, i64 17, i64 14, i64 64, i64 25, i64 68, i64 79], [100 x i64] [i64 85, i64 76, i64 83, i64 89, i64 60, i64 22, i64 82, i64 94, i64 27, i64 54, i64 58, i64 79, i64 87, i64 54, i64 78, i64 31, i64 78, i64 12, i64 64, i64 62, i64 100, i64 84, i64 10, i64 94, i64 74, i64 28, i64 7, i64 37, i64 19, i64 41, i64 82, i64 70, i64 16, i64 31, i64 58, i64 43, i64 19, i64 5, i64 36, i64 12, i64 59, i64 94, i64 91, i64 11, i64 13, i64 69, i64 42, i64 91, i64 81, i64 6, i64 53, i64 80, i64 90, i64 29, i64 40, i64 30, i64 23, i64 13, i64 33, i64 9, i64 21, i64 15, i64 79, i64 3, i64 12, i64 37, i64 46, i64 31, i64 8, i64 48, i64 44, i64 34, i64 42, i64 34, i64 45, i64 21, i64 69, i64 54, i64 12, i64 16, i64 60, i64 65, i64 96, i64 15, i64 60, i64 1, i64 45, i64 84, i64 82, i64 45, i64 93, i64 2, i64 60, i64 71, i64 5, i64 38, i64 74, i64 18, i64 69, i64 49], [100 x i64] [i64 66, i64 12, i64 83, i64 74, i64 47, i64 94, i64 96, i64 15, i64 47, i64 74, i64 31, i64 6, i64 4, i64 94, i64 89, i64 64, i64 61, i64 100, i64 13, i64 42, i64 44, i64 72, i64 44, i64 70, i64 9, i64 16, i64 7, i64 83, i64 34, i64 77, i64 98, i64 66, i64 55, i64 80, i64 40, i64 1, i64 74, i64 1, i64 84, i64 20, i64 41, i64 81, i64 94, i64 45, i64 40, i64 48, i64 8, i64 1, i64 47, i64 89, i64 43, i64 58, i64 60, i64 54, i64 27, i64 69, i64 36, i64 1, i64 18, i64 70, i64 44, i64 15, i64 1, i64 99, i64 96, i64 7, i64 0, i64 35, i64 75, i64 50, i64 21, i64 15, i64 30, i64 14, i64 60, i64 37, i64 62, i64 35, i64 38, i64 76, i64 23, i64 47, i64 33, i64 49, i64 67, i64 60, i64 18, i64 2, i64 27, i64 2, i64 38, i64 71, i64 17, i64 6, i64 70, i64 79, i64 13, i64 36, i64 80, i64 89], [100 x i64] [i64 86, i64 1, i64 3, i64 82, i64 15, i64 30, i64 18, i64 44, i64 31, i64 22, i64 19, i64 54, i64 36, i64 52, i64 69, i64 69, i64 78, i64 53, i64 72, i64 5, i64 55, i64 76, i64 42, i64 73, i64 82, i64 11, i64 17, i64 62, i64 47, i64 98, i64 50, i64 99, i64 99, i64 19, i64 81, i64 80, i64 15, i64 65, i64 23, i64 46, i64 54, i64 8, i64 66, i64 56, i64 60, i64 35, i64 24, i64 4, i64 88, i64 62, i64 76, i64 43, i64 38, i64 17, i64 82, i64 86, i64 29, i64 65, i64 47, i64 42, i64 62, i64 63, i64 41, i64 26, i64 49, i64 88, i64 6, i64 64, i64 18, i64 96, i64 10, i64 72, i64 4, i64 42, i64 94, i64 64, i64 77, i64 18, i64 34, i64 31, i64 80, i64 9, i64 40, i64 84, i64 27, i64 21, i64 70, i64 22, i64 86, i64 83, i64 64, i64 14, i64 46, i64 4, i64 40, i64 61, i64 92, i64 46, i64 24, i64 10], [100 x i64] [i64 42, i64 0, i64 48, i64 12, i64 9, i64 42, i64 76, i64 86, i64 26, i64 77, i64 83, i64 5, i64 86, i64 22, i64 56, i64 79, i64 43, i64 92, i64 0, i64 96, i64 40, i64 65, i64 76, i64 52, i64 35, i64 15, i64 12, i64 94, i64 28, i64 3, i64 3, i64 36, i64 3, i64 17, i64 48, i64 79, i64 25, i64 90, i64 65, i64 51, i64 66, i64 47, i64 23, i64 18, i64 36, i64 79, i64 97, i64 79, i64 36, i64 98, i64 40, i64 76, i64 28, i64 15, i64 28, i64 63, i64 98, i64 40, i64 56, i64 25, i64 43, i64 25, i64 27, i64 13, i64 9, i64 75, i64 92, i64 34, i64 30, i64 22, i64 86, i64 97, i64 36, i64 75, i64 81, i64 72, i64 19, i64 77, i64 16, i64 55, i64 40, i64 23, i64 97, i64 68, i64 4, i64 24, i64 31, i64 1, i64 31, i64 53, i64 93, i64 40, i64 79, i64 19, i64 19, i64 88, i64 60, i64 78, i64 88, i64 91], [100 x i64] [i64 66, i64 39, i64 53, i64 1, i64 13, i64 33, i64 39, i64 32, i64 76, i64 22, i64 53, i64 16, i64 11, i64 16, i64 84, i64 15, i64 40, i64 81, i64 17, i64 37, i64 34, i64 76, i64 44, i64 79, i64 96, i64 63, i64 32, i64 21, i64 6, i64 86, i64 11, i64 73, i64 25, i64 30, i64 40, i64 4, i64 29, i64 46, i64 3, i64 5, i64 68, i64 56, i64 21, i64 79, i64 72, i64 71, i64 60, i64 79, i64 18, i64 77, i64 82, i64 52, i64 53, i64 25, i64 97, i64 14, i64 55, i64 95, i64 35, i64 61, i64 80, i64 13, i64 33, i64 4, i64 9, i64 74, i64 9, i64 39, i64 19, i64 12, i64 10, i64 53, i64 34, i64 98, i64 98, i64 73, i64 68, i64 57, i64 17, i64 52, i64 0, i64 99, i64 3, i64 19, i64 24, i64 66, i64 100, i64 79, i64 60, i64 34, i64 39, i64 40, i64 13, i64 39, i64 44, i64 23, i64 79, i64 19, i64 28, i64 64], [100 x i64] [i64 98, i64 38, i64 16, i64 32, i64 35, i64 80, i64 71, i64 69, i64 36, i64 88, i64 21, i64 2, i64 86, i64 91, i64 21, i64 76, i64 57, i64 87, i64 20, i64 83, i64 21, i64 26, i64 22, i64 0, i64 65, i64 33, i64 90, i64 9, i64 18, i64 17, i64 73, i64 16, i64 55, i64 55, i64 14, i64 56, i64 34, i64 85, i64 92, i64 36, i64 38, i64 79, i64 5, i64 90, i64 35, i64 93, i64 66, i64 58, i64 80, i64 86, i64 41, i64 67, i64 78, i64 29, i64 67, i64 8, i64 62, i64 57, i64 17, i64 47, i64 74, i64 90, i64 63, i64 96, i64 44, i64 43, i64 17, i64 44, i64 27, i64 75, i64 47, i64 65, i64 53, i64 52, i64 54, i64 55, i64 10, i64 86, i64 12, i64 90, i64 38, i64 53, i64 56, i64 15, i64 49, i64 23, i64 24, i64 77, i64 46, i64 41, i64 23, i64 19, i64 98, i64 86, i64 81, i64 7, i64 95, i64 65, i64 18, i64 21], [100 x i64] [i64 39, i64 31, i64 52, i64 59, i64 49, i64 73, i64 13, i64 59, i64 24, i64 25, i64 49, i64 62, i64 45, i64 4, i64 44, i64 60, i64 94, i64 34, i64 36, i64 39, i64 41, i64 60, i64 25, i64 4, i64 11, i64 72, i64 12, i64 6, i64 36, i64 97, i64 94, i64 76, i64 27, i64 12, i64 34, i64 76, i64 85, i64 13, i64 34, i64 75, i64 4, i64 83, i64 3, i64 49, i64 54, i64 47, i64 8, i64 47, i64 47, i64 11, i64 53, i64 88, i64 71, i64 44, i64 59, i64 48, i64 15, i64 71, i64 54, i64 52, i64 67, i64 14, i64 27, i64 94, i64 26, i64 27, i64 69, i64 77, i64 6, i64 69, i64 51, i64 10, i64 52, i64 54, i64 26, i64 72, i64 67, i64 0, i64 85, i64 80, i64 11, i64 37, i64 34, i64 48, i64 81, i64 93, i64 97, i64 97, i64 29, i64 16, i64 14, i64 96, i64 30, i64 7, i64 55, i64 56, i64 34, i64 90, i64 99, i64 6], [100 x i64] [i64 58, i64 50, i64 16, i64 76, i64 70, i64 8, i64 47, i64 3, i64 9, i64 32, i64 49, i64 87, i64 69, i64 83, i64 35, i64 16, i64 75, i64 98, i64 79, i64 3, i64 13, i64 93, i64 65, i64 44, i64 100, i64 86, i64 66, i64 100, i64 75, i64 65, i64 5, i64 33, i64 81, i64 88, i64 75, i64 16, i64 97, i64 22, i64 86, i64 72, i64 54, i64 35, i64 58, i64 89, i64 17, i64 59, i64 71, i64 59, i64 56, i64 49, i64 28, i64 70, i64 41, i64 60, i64 80, i64 40, i64 45, i64 11, i64 5, i64 20, i64 42, i64 10, i64 19, i64 22, i64 99, i64 94, i64 5, i64 61, i64 82, i64 91, i64 32, i64 1, i64 25, i64 90, i64 57, i64 9, i64 49, i64 27, i64 34, i64 71, i64 43, i64 62, i64 40, i64 50, i64 21, i64 86, i64 91, i64 33, i64 98, i64 62, i64 53, i64 39, i64 73, i64 38, i64 28, i64 37, i64 98, i64 33, i64 98, i64 80], [100 x i64] [i64 90, i64 29, i64 47, i64 82, i64 85, i64 3, i64 57, i64 100, i64 98, i64 91, i64 71, i64 40, i64 18, i64 77, i64 90, i64 6, i64 63, i64 46, i64 39, i64 26, i64 8, i64 58, i64 31, i64 47, i64 96, i64 59, i64 84, i64 59, i64 58, i64 47, i64 38, i64 48, i64 76, i64 52, i64 96, i64 26, i64 55, i64 52, i64 26, i64 52, i64 42, i64 63, i64 58, i64 26, i64 5, i64 48, i64 32, i64 68, i64 60, i64 37, i64 60, i64 68, i64 95, i64 92, i64 14, i64 56, i64 16, i64 64, i64 15, i64 75, i64 10, i64 19, i64 89, i64 52, i64 71, i64 84, i64 79, i64 26, i64 1, i64 71, i64 44, i64 43, i64 100, i64 2, i64 35, i64 4, i64 16, i64 68, i64 39, i64 76, i64 4, i64 99, i64 10, i64 100, i64 56, i64 91, i64 21, i64 73, i64 55, i64 36, i64 13, i64 31, i64 56, i64 1, i64 84, i64 93, i64 51, i64 28, i64 85, i64 52], [100 x i64] [i64 65, i64 29, i64 61, i64 64, i64 98, i64 96, i64 68, i64 13, i64 29, i64 73, i64 55, i64 34, i64 38, i64 65, i64 100, i64 94, i64 56, i64 87, i64 32, i64 77, i64 23, i64 45, i64 7, i64 45, i64 12, i64 91, i64 37, i64 29, i64 85, i64 22, i64 47, i64 49, i64 17, i64 74, i64 12, i64 14, i64 70, i64 47, i64 94, i64 65, i64 86, i64 48, i64 99, i64 23, i64 13, i64 64, i64 84, i64 35, i64 51, i64 15, i64 11, i64 40, i64 27, i64 18, i64 51, i64 5, i64 76, i64 88, i64 1, i64 26, i64 76, i64 48, i64 76, i64 59, i64 22, i64 54, i64 73, i64 58, i64 67, i64 32, i64 22, i64 53, i64 81, i64 88, i64 76, i64 60, i64 17, i64 25, i64 95, i64 34, i64 7, i64 5, i64 40, i64 34, i64 90, i64 91, i64 5, i64 31, i64 45, i64 6, i64 58, i64 20, i64 21, i64 33, i64 80, i64 9, i64 53, i64 18, i64 67, i64 20], [100 x i64] [i64 51, i64 55, i64 73, i64 31, i64 42, i64 14, i64 57, i64 26, i64 40, i64 51, i64 60, i64 13, i64 22, i64 0, i64 47, i64 78, i64 91, i64 18, i64 9, i64 1, i64 92, i64 33, i64 22, i64 79, i64 32, i64 68, i64 88, i64 85, i64 86, i64 20, i64 71, i64 2, i64 75, i64 43, i64 100, i64 84, i64 24, i64 56, i64 9, i64 30, i64 6, i64 35, i64 43, i64 95, i64 1, i64 56, i64 73, i64 59, i64 40, i64 48, i64 60, i64 31, i64 81, i64 82, i64 9, i64 12, i64 15, i64 97, i64 63, i64 1, i64 83, i64 34, i64 70, i64 58, i64 43, i64 70, i64 41, i64 67, i64 25, i64 16, i64 63, i64 99, i64 17, i64 5, i64 93, i64 19, i64 27, i64 31, i64 78, i64 68, i64 79, i64 37, i64 99, i64 59, i64 86, i64 75, i64 37, i64 0, i64 37, i64 67, i64 68, i64 20, i64 0, i64 38, i64 78, i64 43, i64 7, i64 85, i64 77, i64 99], [100 x i64] [i64 67, i64 39, i64 97, i64 84, i64 11, i64 90, i64 2, i64 38, i64 20, i64 46, i64 5, i64 100, i64 50, i64 71, i64 24, i64 35, i64 45, i64 28, i64 1, i64 82, i64 95, i64 36, i64 68, i64 61, i64 40, i64 11, i64 70, i64 47, i64 62, i64 46, i64 11, i64 28, i64 52, i64 8, i64 79, i64 63, i64 98, i64 81, i64 67, i64 84, i64 94, i64 39, i64 49, i64 43, i64 9, i64 40, i64 78, i64 20, i64 68, i64 45, i64 68, i64 28, i64 81, i64 36, i64 89, i64 20, i64 47, i64 58, i64 33, i64 9, i64 71, i64 45, i64 37, i64 22, i64 53, i64 82, i64 51, i64 16, i64 29, i64 84, i64 100, i64 22, i64 22, i64 15, i64 65, i64 98, i64 55, i64 8, i64 17, i64 22, i64 19, i64 86, i64 16, i64 0, i64 21, i64 4, i64 87, i64 34, i64 28, i64 20, i64 43, i64 99, i64 31, i64 47, i64 87, i64 50, i64 28, i64 3, i64 66, i64 57], [100 x i64] [i64 88, i64 31, i64 45, i64 76, i64 46, i64 9, i64 74, i64 0, i64 84, i64 91, i64 89, i64 3, i64 42, i64 4, i64 3, i64 63, i64 8, i64 56, i64 98, i64 3, i64 76, i64 6, i64 1, i64 73, i64 53, i64 55, i64 22, i64 48, i64 58, i64 54, i64 71, i64 11, i64 86, i64 16, i64 88, i64 98, i64 92, i64 61, i64 99, i64 76, i64 17, i64 53, i64 79, i64 60, i64 58, i64 48, i64 89, i64 32, i64 3, i64 52, i64 35, i64 46, i64 59, i64 3, i64 18, i64 78, i64 24, i64 7, i64 92, i64 48, i64 61, i64 63, i64 60, i64 12, i64 79, i64 47, i64 10, i64 70, i64 74, i64 75, i64 11, i64 91, i64 27, i64 90, i64 16, i64 51, i64 3, i64 5, i64 84, i64 74, i64 57, i64 85, i64 19, i64 15, i64 54, i64 3, i64 60, i64 44, i64 10, i64 51, i64 93, i64 38, i64 13, i64 52, i64 50, i64 58, i64 65, i64 60, i64 28, i64 38], [100 x i64] [i64 34, i64 39, i64 95, i64 28, i64 96, i64 11, i64 79, i64 99, i64 16, i64 28, i64 38, i64 73, i64 80, i64 57, i64 55, i64 100, i64 27, i64 14, i64 44, i64 3, i64 65, i64 36, i64 41, i64 79, i64 54, i64 92, i64 2, i64 18, i64 17, i64 30, i64 56, i64 18, i64 36, i64 50, i64 46, i64 98, i64 27, i64 24, i64 62, i64 43, i64 19, i64 0, i64 83, i64 99, i64 23, i64 37, i64 98, i64 50, i64 51, i64 41, i64 20, i64 82, i64 43, i64 61, i64 26, i64 97, i64 18, i64 29, i64 14, i64 2, i64 25, i64 36, i64 20, i64 61, i64 53, i64 66, i64 24, i64 80, i64 56, i64 87, i64 90, i64 41, i64 87, i64 72, i64 39, i64 9, i64 8, i64 3, i64 26, i64 25, i64 44, i64 46, i64 73, i64 54, i64 73, i64 100, i64 50, i64 58, i64 95, i64 31, i64 60, i64 19, i64 67, i64 80, i64 47, i64 86, i64 11, i64 71, i64 32, i64 33], [100 x i64] [i64 23, i64 21, i64 75, i64 9, i64 93, i64 80, i64 86, i64 67, i64 83, i64 11, i64 58, i64 94, i64 23, i64 30, i64 47, i64 96, i64 96, i64 63, i64 19, i64 56, i64 94, i64 79, i64 42, i64 27, i64 24, i64 89, i64 12, i64 1, i64 25, i64 44, i64 35, i64 49, i64 65, i64 76, i64 58, i64 23, i64 21, i64 9, i64 90, i64 4, i64 87, i64 13, i64 64, i64 9, i64 10, i64 77, i64 72, i64 72, i64 39, i64 91, i64 28, i64 33, i64 70, i64 70, i64 60, i64 60, i64 24, i64 72, i64 62, i64 49, i64 83, i64 63, i64 64, i64 47, i64 4, i64 89, i64 37, i64 25, i64 98, i64 26, i64 96, i64 85, i64 6, i64 25, i64 94, i64 16, i64 1, i64 31, i64 54, i64 41, i64 22, i64 48, i64 74, i64 58, i64 17, i64 100, i64 17, i64 7, i64 71, i64 45, i64 57, i64 19, i64 74, i64 20, i64 67, i64 78, i64 75, i64 3, i64 70, i64 73], [100 x i64] [i64 96, i64 65, i64 57, i64 68, i64 57, i64 16, i64 50, i64 58, i64 14, i64 4, i64 99, i64 36, i64 52, i64 38, i64 60, i64 36, i64 37, i64 43, i64 43, i64 75, i64 89, i64 66, i64 94, i64 62, i64 53, i64 60, i64 6, i64 27, i64 29, i64 76, i64 100, i64 92, i64 6, i64 22, i64 59, i64 63, i64 5, i64 9, i64 21, i64 19, i64 13, i64 86, i64 21, i64 31, i64 24, i64 47, i64 67, i64 61, i64 90, i64 10, i64 35, i64 44, i64 42, i64 29, i64 73, i64 95, i64 55, i64 79, i64 22, i64 51, i64 54, i64 88, i64 42, i64 26, i64 10, i64 0, i64 56, i64 82, i64 9, i64 77, i64 67, i64 89, i64 28, i64 88, i64 20, i64 52, i64 34, i64 53, i64 80, i64 90, i64 29, i64 14, i64 34, i64 72, i64 9, i64 6, i64 66, i64 65, i64 85, i64 54, i64 82, i64 4, i64 42, i64 23, i64 97, i64 18, i64 23, i64 52, i64 100, i64 100], [100 x i64] [i64 95, i64 66, i64 54, i64 23, i64 19, i64 40, i64 75, i64 19, i64 60, i64 20, i64 8, i64 89, i64 35, i64 42, i64 60, i64 10, i64 48, i64 93, i64 41, i64 99, i64 46, i64 22, i64 69, i64 54, i64 45, i64 66, i64 38, i64 35, i64 17, i64 37, i64 0, i64 12, i64 69, i64 54, i64 35, i64 54, i64 61, i64 76, i64 73, i64 20, i64 97, i64 48, i64 8, i64 98, i64 90, i64 35, i64 7, i64 4, i64 94, i64 15, i64 69, i64 5, i64 37, i64 38, i64 60, i64 83, i64 3, i64 98, i64 84, i64 20, i64 1, i64 84, i64 99, i64 36, i64 3, i64 100, i64 57, i64 64, i64 76, i64 96, i64 50, i64 38, i64 43, i64 25, i64 35, i64 100, i64 60, i64 8, i64 70, i64 53, i64 23, i64 38, i64 58, i64 27, i64 42, i64 84, i64 76, i64 11, i64 48, i64 59, i64 99, i64 15, i64 8, i64 97, i64 51, i64 11, i64 97, i64 7, i64 42, i64 38], [100 x i64] [i64 70, i64 58, i64 76, i64 12, i64 83, i64 77, i64 11, i64 42, i64 51, i64 47, i64 61, i64 75, i64 86, i64 86, i64 68, i64 94, i64 69, i64 43, i64 5, i64 16, i64 1, i64 3, i64 31, i64 9, i64 100, i64 49, i64 87, i64 62, i64 22, i64 95, i64 100, i64 92, i64 53, i64 41, i64 71, i64 35, i64 17, i64 48, i64 44, i64 69, i64 96, i64 4, i64 9, i64 47, i64 56, i64 77, i64 40, i64 25, i64 86, i64 45, i64 7, i64 87, i64 48, i64 5, i64 62, i64 14, i64 20, i64 48, i64 76, i64 8, i64 43, i64 76, i64 67, i64 62, i64 16, i64 37, i64 97, i64 0, i64 85, i64 6, i64 35, i64 80, i64 78, i64 10, i64 26, i64 33, i64 53, i64 33, i64 24, i64 38, i64 78, i64 32, i64 24, i64 93, i64 3, i64 52, i64 6, i64 90, i64 100, i64 48, i64 98, i64 8, i64 90, i64 64, i64 70, i64 6, i64 67, i64 33, i64 73, i64 52], [100 x i64] [i64 39, i64 7, i64 98, i64 16, i64 84, i64 91, i64 16, i64 36, i64 23, i64 40, i64 74, i64 67, i64 38, i64 64, i64 59, i64 41, i64 15, i64 31, i64 97, i64 81, i64 80, i64 61, i64 56, i64 35, i64 24, i64 25, i64 41, i64 92, i64 24, i64 80, i64 9, i64 30, i64 53, i64 6, i64 12, i64 36, i64 97, i64 28, i64 72, i64 86, i64 69, i64 11, i64 53, i64 6, i64 75, i64 78, i64 14, i64 56, i64 76, i64 10, i64 37, i64 55, i64 37, i64 93, i64 56, i64 62, i64 84, i64 98, i64 19, i64 75, i64 43, i64 28, i64 4, i64 97, i64 0, i64 83, i64 32, i64 98, i64 11, i64 71, i64 49, i64 80, i64 82, i64 1, i64 52, i64 23, i64 80, i64 66, i64 45, i64 55, i64 43, i64 48, i64 76, i64 80, i64 40, i64 31, i64 7, i64 91, i64 95, i64 93, i64 31, i64 38, i64 20, i64 1, i64 0, i64 88, i64 84, i64 32, i64 51, i64 95], [100 x i64] [i64 2, i64 100, i64 40, i64 85, i64 1, i64 59, i64 74, i64 47, i64 91, i64 18, i64 68, i64 33, i64 67, i64 9, i64 80, i64 73, i64 6, i64 53, i64 29, i64 1, i64 46, i64 60, i64 5, i64 32, i64 61, i64 5, i64 86, i64 11, i64 3, i64 36, i64 72, i64 6, i64 36, i64 12, i64 57, i64 37, i64 71, i64 97, i64 50, i64 61, i64 14, i64 17, i64 61, i64 47, i64 93, i64 6, i64 20, i64 99, i64 25, i64 15, i64 66, i64 37, i64 76, i64 71, i64 36, i64 2, i64 42, i64 21, i64 80, i64 12, i64 58, i64 52, i64 18, i64 94, i64 30, i64 41, i64 97, i64 67, i64 3, i64 12, i64 94, i64 17, i64 96, i64 54, i64 31, i64 88, i64 26, i64 51, i64 86, i64 18, i64 66, i64 52, i64 55, i64 7, i64 89, i64 91, i64 77, i64 98, i64 79, i64 56, i64 9, i64 36, i64 74, i64 94, i64 96, i64 3, i64 34, i64 92, i64 70, i64 37], [100 x i64] [i64 3, i64 64, i64 20, i64 65, i64 84, i64 51, i64 52, i64 77, i64 68, i64 37, i64 95, i64 0, i64 55, i64 15, i64 7, i64 10, i64 6, i64 50, i64 7, i64 85, i64 73, i64 16, i64 87, i64 46, i64 9, i64 82, i64 50, i64 9, i64 39, i64 86, i64 12, i64 8, i64 49, i64 32, i64 73, i64 100, i64 50, i64 24, i64 76, i64 17, i64 27, i64 70, i64 17, i64 83, i64 51, i64 92, i64 93, i64 23, i64 7, i64 66, i64 74, i64 80, i64 82, i64 60, i64 26, i64 57, i64 41, i64 42, i64 66, i64 80, i64 27, i64 78, i64 88, i64 77, i64 76, i64 26, i64 42, i64 25, i64 50, i64 17, i64 9, i64 78, i64 53, i64 26, i64 26, i64 3, i64 84, i64 85, i64 27, i64 92, i64 50, i64 0, i64 71, i64 31, i64 27, i64 63, i64 88, i64 34, i64 4, i64 19, i64 14, i64 32, i64 97, i64 68, i64 75, i64 72, i64 95, i64 16, i64 64, i64 10], [100 x i64] [i64 100, i64 73, i64 88, i64 52, i64 65, i64 80, i64 21, i64 49, i64 64, i64 14, i64 6, i64 13, i64 15, i64 77, i64 10, i64 8, i64 6, i64 64, i64 42, i64 10, i64 83, i64 22, i64 8, i64 45, i64 91, i64 49, i64 84, i64 51, i64 65, i64 47, i64 27, i64 30, i64 86, i64 82, i64 82, i64 50, i64 61, i64 70, i64 65, i64 92, i64 84, i64 71, i64 71, i64 65, i64 14, i64 82, i64 73, i64 20, i64 11, i64 15, i64 97, i64 61, i64 37, i64 5, i64 72, i64 94, i64 54, i64 55, i64 10, i64 86, i64 68, i64 38, i64 15, i64 53, i64 19, i64 64, i64 70, i64 80, i64 33, i64 34, i64 37, i64 16, i64 72, i64 8, i64 82, i64 86, i64 56, i64 54, i64 5, i64 33, i64 69, i64 1, i64 94, i64 73, i64 73, i64 66, i64 66, i64 27, i64 87, i64 77, i64 79, i64 55, i64 14, i64 94, i64 74, i64 100, i64 57, i64 43, i64 45, i64 90], [100 x i64] [i64 44, i64 83, i64 73, i64 15, i64 91, i64 54, i64 0, i64 46, i64 74, i64 72, i64 79, i64 9, i64 39, i64 39, i64 82, i64 12, i64 71, i64 13, i64 5, i64 57, i64 90, i64 84, i64 11, i64 70, i64 77, i64 52, i64 69, i64 0, i64 95, i64 14, i64 56, i64 38, i64 63, i64 28, i64 19, i64 53, i64 48, i64 19, i64 65, i64 89, i64 57, i64 9, i64 98, i64 97, i64 14, i64 45, i64 8, i64 85, i64 58, i64 80, i64 42, i64 14, i64 63, i64 19, i64 50, i64 5, i64 71, i64 86, i64 72, i64 66, i64 66, i64 28, i64 70, i64 28, i64 56, i64 90, i64 81, i64 71, i64 75, i64 11, i64 59, i64 32, i64 87, i64 56, i64 28, i64 1, i64 67, i64 2, i64 86, i64 91, i64 82, i64 27, i64 71, i64 10, i64 47, i64 21, i64 82, i64 17, i64 6, i64 54, i64 49, i64 38, i64 82, i64 86, i64 66, i64 3, i64 75, i64 12, i64 74, i64 15], [100 x i64] [i64 23, i64 99, i64 47, i64 9, i64 20, i64 75, i64 10, i64 87, i64 43, i64 63, i64 44, i64 91, i64 90, i64 14, i64 0, i64 2, i64 35, i64 83, i64 87, i64 7, i64 2, i64 1, i64 45, i64 84, i64 87, i64 77, i64 53, i64 27, i64 89, i64 94, i64 43, i64 78, i64 92, i64 90, i64 88, i64 12, i64 31, i64 64, i64 65, i64 74, i64 93, i64 8, i64 65, i64 49, i64 23, i64 31, i64 51, i64 24, i64 80, i64 3, i64 99, i64 82, i64 5, i64 9, i64 31, i64 92, i64 87, i64 85, i64 19, i64 41, i64 78, i64 62, i64 19, i64 35, i64 17, i64 73, i64 13, i64 48, i64 2, i64 79, i64 89, i64 96, i64 53, i64 19, i64 44, i64 42, i64 50, i64 61, i64 67, i64 30, i64 65, i64 31, i64 78, i64 36, i64 40, i64 9, i64 94, i64 93, i64 60, i64 12, i64 34, i64 3, i64 40, i64 53, i64 38, i64 24, i64 92, i64 52, i64 72, i64 94], [100 x i64] [i64 97, i64 60, i64 89, i64 15, i64 79, i64 99, i64 58, i64 96, i64 26, i64 91, i64 92, i64 91, i64 21, i64 69, i64 93, i64 27, i64 44, i64 86, i64 20, i64 3, i64 65, i64 54, i64 6, i64 71, i64 73, i64 11, i64 95, i64 64, i64 29, i64 67, i64 23, i64 92, i64 93, i64 79, i64 6, i64 38, i64 77, i64 30, i64 33, i64 2, i64 20, i64 91, i64 59, i64 7, i64 59, i64 51, i64 1, i64 3, i64 3, i64 21, i64 73, i64 68, i64 41, i64 46, i64 4, i64 80, i64 57, i64 100, i64 9, i64 86, i64 32, i64 32, i64 43, i64 24, i64 10, i64 49, i64 28, i64 88, i64 80, i64 27, i64 56, i64 66, i64 17, i64 82, i64 40, i64 77, i64 32, i64 41, i64 46, i64 1, i64 28, i64 85, i64 35, i64 69, i64 30, i64 40, i64 14, i64 53, i64 39, i64 23, i64 4, i64 71, i64 55, i64 47, i64 61, i64 66, i64 97, i64 56, i64 19, i64 42], [100 x i64] [i64 83, i64 41, i64 74, i64 0, i64 22, i64 80, i64 77, i64 21, i64 20, i64 89, i64 22, i64 14, i64 73, i64 58, i64 83, i64 70, i64 98, i64 63, i64 22, i64 2, i64 86, i64 27, i64 39, i64 41, i64 40, i64 66, i64 73, i64 36, i64 21, i64 92, i64 44, i64 4, i64 32, i64 85, i64 4, i64 21, i64 64, i64 47, i64 42, i64 85, i64 1, i64 64, i64 65, i64 40, i64 88, i64 48, i64 9, i64 51, i64 77, i64 99, i64 53, i64 63, i64 92, i64 58, i64 3, i64 31, i64 24, i64 76, i64 34, i64 11, i64 33, i64 44, i64 15, i64 31, i64 28, i64 86, i64 52, i64 93, i64 99, i64 94, i64 43, i64 100, i64 24, i64 7, i64 40, i64 11, i64 21, i64 15, i64 63, i64 99, i64 13, i64 82, i64 61, i64 4, i64 40, i64 30, i64 2, i64 30, i64 72, i64 36, i64 41, i64 71, i64 80, i64 23, i64 1, i64 8, i64 8, i64 20, i64 67, i64 7]], align 16 +@qHead = dso_local local_unnamed_addr global %struct._QITEM* null, align 8 +@g_qCount = dso_local local_unnamed_addr global i64 0, align 8 +@.str = private unnamed_addr constant [5 x i8] c" %ld\00", align 1 +@ch = dso_local local_unnamed_addr global i64 0, align 8 +@rgnNodes = dso_local global [100 x %struct._NODE] zeroinitializer, align 16 +@iNode = dso_local local_unnamed_addr global i64 0, align 8 +@iDist = dso_local local_unnamed_addr global i64 0, align 8 +@iPrev = dso_local local_unnamed_addr global i64 0, align 8 +@i = dso_local local_unnamed_addr global i64 0, align 8 +@AdjMatrix = dso_local local_unnamed_addr global [100 x [100 x i64]] zeroinitializer, align 16 +@iCost = dso_local local_unnamed_addr global i64 0, align 8 +@.str.3 = private unnamed_addr constant [32 x i8] c"Shortest path is %ld in cost.\0A\00", align 1 +@.str.6 = private unnamed_addr constant [16 x i8] c"Data @ %d : %d\0A\00", align 1 +@str = private unnamed_addr constant [14 x i8] c"Out of memory\00", align 1 +@str.7 = private unnamed_addr constant [10 x i8] c"Path is: \00", align 1 +@str.8 = private unnamed_addr constant [53 x i8] c"Shortest path is 0 in cost. Just stay where you are.\00", align 1 + +; Function Attrs: mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn +define dso_local i8* @s_malloc(i64 %0) local_unnamed_addr #0 { + %2 = load i8*, i8** @alloc_ptr, align 8, !tbaa !5 + %3 = getelementptr inbounds i8, i8* %2, i64 %0 + %4 = icmp ult i8* %3, getelementptr inbounds ([16384 x i8], [16384 x i8]* @alloc_pool, i64 1, i64 0) + %5 = getelementptr inbounds [16384 x i8], [16384 x i8]* @alloc_pool, i64 0, i64 %0 + %6 = select i1 %4, i8* %3, i8* %5 + %7 = select i1 %4, i8* %2, i8* getelementptr inbounds ([16384 x i8], [16384 x i8]* @alloc_pool, i64 0, i64 0) + store i8* %6, i8** @alloc_ptr, align 8, !tbaa !5 + ret i8* %7 +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone sspstrong uwtable willreturn +define dso_local void @s_free(i8* nocapture %0) local_unnamed_addr #1 { + ret void +} + +; Function Attrs: nofree nounwind sspstrong uwtable +define dso_local void @print_path(%struct._NODE* nocapture readonly %0, i64 %1) local_unnamed_addr #2 { + %3 = getelementptr inbounds %struct._NODE, %struct._NODE* %0, i64 %1, i32 1 + %4 = load i64, i64* %3, align 8, !tbaa !9 + %5 = icmp eq i64 %4, 9999 + br i1 %5, label %7, label %6 + +6: ; preds = %2 + call void @print_path(%struct._NODE* %0, i64 %4) + br label %7 + +7: ; preds = %6, %2 + %8 = call i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i64 0, i64 0), i64 %1) + ret void +} + +; Function Attrs: nofree nounwind +declare noundef i32 @printf(i8* nocapture noundef readonly, ...) local_unnamed_addr #3 + +; Function Attrs: nofree nounwind sspstrong uwtable +define dso_local void @enqueue(i64 %0, i64 %1, i64 %2) local_unnamed_addr #2 { + %4 = load i8*, i8** @alloc_ptr, align 8, !tbaa !5 + %5 = getelementptr inbounds i8, i8* %4, i64 32 + %6 = icmp ult i8* %5, getelementptr inbounds ([16384 x i8], [16384 x i8]* @alloc_pool, i64 1, i64 0) + %7 = select i1 %6, i8* %5, i8* getelementptr inbounds ([16384 x i8], [16384 x i8]* @alloc_pool, i64 0, i64 32) + %8 = select i1 %6, i8* %4, i8* getelementptr inbounds ([16384 x i8], [16384 x i8]* @alloc_pool, i64 0, i64 0) + store i8* %7, i8** @alloc_ptr, align 8, !tbaa !5 + %9 = load %struct._QITEM*, %struct._QITEM** @qHead, align 8, !tbaa !5 + %10 = icmp eq i8* %8, null + br i1 %10, label %11, label %14 + +11: ; preds = %3 + %12 = call i32 @puts(i8* nonnull dereferenceable(1) getelementptr inbounds ([14 x i8], [14 x i8]* @str, i64 0, i64 0)) + br label %13 + +13: ; preds = %13, %11 + br label %13, !llvm.loop !12 + +14: ; preds = %3 + %15 = bitcast i8* %8 to i64* + store i64 %0, i64* %15, align 8, !tbaa !14 + %16 = getelementptr inbounds i8, i8* %8, i64 8 + %17 = bitcast i8* %16 to i64* + store i64 %1, i64* %17, align 8, !tbaa !16 + %18 = getelementptr inbounds i8, i8* %8, i64 16 + %19 = bitcast i8* %18 to i64* + store i64 %2, i64* %19, align 8, !tbaa !17 + %20 = getelementptr inbounds i8, i8* %8, i64 24 + %21 = bitcast i8* %20 to %struct._QITEM** + store %struct._QITEM* null, %struct._QITEM** %21, align 8, !tbaa !18 + %22 = icmp eq %struct._QITEM* %9, null + br i1 %22, label %31, label %23 + +23: ; preds = %14, %23 + %24 = phi %struct._QITEM* [ %26, %23 ], [ %9, %14 ] + %25 = getelementptr inbounds %struct._QITEM, %struct._QITEM* %24, i64 0, i32 3 + %26 = load %struct._QITEM*, %struct._QITEM** %25, align 8, !tbaa !18 + %27 = icmp eq %struct._QITEM* %26, null + br i1 %27, label %28, label %23, !llvm.loop !19 + +28: ; preds = %23 + %29 = getelementptr inbounds %struct._QITEM, %struct._QITEM* %24, i64 0, i32 3 + %30 = bitcast %struct._QITEM** %29 to i8** + br label %31 + +31: ; preds = %14, %28 + %32 = phi i8** [ %30, %28 ], [ bitcast (%struct._QITEM** @qHead to i8**), %14 ] + store i8* %8, i8** %32, align 8, !tbaa !5 + %33 = load i64, i64* @g_qCount, align 8, !tbaa !21 + %34 = add nsw i64 %33, 1 + store i64 %34, i64* @g_qCount, align 8, !tbaa !21 + ret void +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn +define dso_local void @dequeue(i64* nocapture %0, i64* nocapture %1, i64* nocapture %2) local_unnamed_addr #0 { + %4 = load %struct._QITEM*, %struct._QITEM** @qHead, align 8, !tbaa !5 + %5 = icmp eq %struct._QITEM* %4, null + br i1 %5, label %17, label %6 + +6: ; preds = %3 + %7 = getelementptr inbounds %struct._QITEM, %struct._QITEM* %4, i64 0, i32 0 + %8 = load i64, i64* %7, align 8, !tbaa !14 + store i64 %8, i64* %0, align 8, !tbaa !21 + %9 = getelementptr inbounds %struct._QITEM, %struct._QITEM* %4, i64 0, i32 1 + %10 = load i64, i64* %9, align 8, !tbaa !16 + store i64 %10, i64* %1, align 8, !tbaa !21 + %11 = getelementptr inbounds %struct._QITEM, %struct._QITEM* %4, i64 0, i32 2 + %12 = load i64, i64* %11, align 8, !tbaa !17 + store i64 %12, i64* %2, align 8, !tbaa !21 + %13 = getelementptr inbounds %struct._QITEM, %struct._QITEM* %4, i64 0, i32 3 + %14 = load %struct._QITEM*, %struct._QITEM** %13, align 8, !tbaa !18 + store %struct._QITEM* %14, %struct._QITEM** @qHead, align 8, !tbaa !5 + %15 = load i64, i64* @g_qCount, align 8, !tbaa !21 + %16 = add nsw i64 %15, -1 + store i64 %16, i64* @g_qCount, align 8, !tbaa !21 + br label %17 + +17: ; preds = %6, %3 + ret void +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind readonly sspstrong uwtable willreturn +define dso_local i32 @qcount() local_unnamed_addr #4 { + %1 = load i64, i64* @g_qCount, align 8, !tbaa !21 + %2 = trunc i64 %1 to i32 + ret i32 %2 +} + +; Function Attrs: nofree nounwind sspstrong uwtable +define dso_local void @dijkstra(i64 %0, i64 %1) local_unnamed_addr #2 { + br label %3 + +3: ; preds = %2, %3 + %4 = phi i64 [ 0, %2 ], [ %7, %3 ] + %5 = getelementptr inbounds [100 x %struct._NODE], [100 x %struct._NODE]* @rgnNodes, i64 0, i64 %4, i32 0 + store i64 9999, i64* %5, align 16, !tbaa !22 + %6 = getelementptr inbounds [100 x %struct._NODE], [100 x %struct._NODE]* @rgnNodes, i64 0, i64 %4, i32 1 + store i64 9999, i64* %6, align 8, !tbaa !9 + %7 = add nuw nsw i64 %4, 1 + %8 = icmp eq i64 %7, 100 + br i1 %8, label %9, label %3, !llvm.loop !23 + +9: ; preds = %3 + store i64 100, i64* @ch, align 8, !tbaa !21 + %10 = icmp eq i64 %0, %1 + br i1 %10, label %11, label %13 + +11: ; preds = %9 + %12 = call i32 @puts(i8* nonnull dereferenceable(1) getelementptr inbounds ([53 x i8], [53 x i8]* @str.8, i64 0, i64 0)) + br label %127 + +13: ; preds = %9 + %14 = getelementptr inbounds [100 x %struct._NODE], [100 x %struct._NODE]* @rgnNodes, i64 0, i64 %0, i32 0 + store i64 0, i64* %14, align 16, !tbaa !22 + %15 = getelementptr inbounds [100 x %struct._NODE], [100 x %struct._NODE]* @rgnNodes, i64 0, i64 %0, i32 1 + store i64 9999, i64* %15, align 8, !tbaa !9 + %16 = load i8*, i8** @alloc_ptr, align 8, !tbaa !5 + %17 = getelementptr inbounds i8, i8* %16, i64 32 + %18 = icmp ult i8* %17, getelementptr inbounds ([16384 x i8], [16384 x i8]* @alloc_pool, i64 1, i64 0) + %19 = select i1 %18, i8* %17, i8* getelementptr inbounds ([16384 x i8], [16384 x i8]* @alloc_pool, i64 0, i64 32) + %20 = select i1 %18, i8* %16, i8* getelementptr inbounds ([16384 x i8], [16384 x i8]* @alloc_pool, i64 0, i64 0) + store i8* %19, i8** @alloc_ptr, align 8, !tbaa !5 + %21 = load %struct._QITEM*, %struct._QITEM** @qHead, align 8, !tbaa !5 + %22 = icmp eq i8* %20, null + br i1 %22, label %23, label %26 + +23: ; preds = %13 + %24 = call i32 @puts(i8* nonnull dereferenceable(1) getelementptr inbounds ([14 x i8], [14 x i8]* @str, i64 0, i64 0)) #7 + br label %25 + +25: ; preds = %25, %23 + br label %25, !llvm.loop !12 + +26: ; preds = %13 + %27 = bitcast i8* %20 to i64* + store i64 %0, i64* %27, align 8, !tbaa !14 + %28 = getelementptr inbounds i8, i8* %20, i64 8 + %29 = bitcast i8* %28 to i64* + store i64 0, i64* %29, align 8, !tbaa !16 + %30 = getelementptr inbounds i8, i8* %20, i64 16 + %31 = bitcast i8* %30 to i64* + store i64 9999, i64* %31, align 8, !tbaa !17 + %32 = getelementptr inbounds i8, i8* %20, i64 24 + %33 = bitcast i8* %32 to %struct._QITEM** + store %struct._QITEM* null, %struct._QITEM** %33, align 8, !tbaa !18 + %34 = icmp eq %struct._QITEM* %21, null + br i1 %34, label %43, label %35 + +35: ; preds = %26, %35 + %36 = phi %struct._QITEM* [ %38, %35 ], [ %21, %26 ] + %37 = getelementptr inbounds %struct._QITEM, %struct._QITEM* %36, i64 0, i32 3 + %38 = load %struct._QITEM*, %struct._QITEM** %37, align 8, !tbaa !18 + %39 = icmp eq %struct._QITEM* %38, null + br i1 %39, label %40, label %35, !llvm.loop !19 + +40: ; preds = %35 + %41 = getelementptr inbounds %struct._QITEM, %struct._QITEM* %36, i64 0, i32 3 + %42 = bitcast %struct._QITEM** %41 to i8** + br label %43 + +43: ; preds = %26, %40 + %44 = phi i8** [ %42, %40 ], [ bitcast (%struct._QITEM** @qHead to i8**), %26 ] + store i8* %20, i8** %44, align 8, !tbaa !5 + %45 = load i64, i64* @g_qCount, align 8, !tbaa !21 + %46 = add nsw i64 %45, 1 + store i64 %46, i64* @g_qCount, align 8, !tbaa !21 + %47 = trunc i64 %46 to i32 + %48 = icmp sgt i32 %47, 0 + br i1 %48, label %53, label %121 + +49: ; preds = %117 + %50 = load i64, i64* @g_qCount, align 8, !tbaa !21 + %51 = trunc i64 %50 to i32 + %52 = icmp sgt i32 %51, 0 + br i1 %52, label %53, label %121, !llvm.loop !24 + +53: ; preds = %43, %49 + %54 = phi i64 [ %50, %49 ], [ %46, %43 ] + %55 = load %struct._QITEM*, %struct._QITEM** @qHead, align 8, !tbaa !5 + %56 = icmp eq %struct._QITEM* %55, null + br i1 %56, label %67, label %57 + +57: ; preds = %53 + %58 = getelementptr inbounds %struct._QITEM, %struct._QITEM* %55, i64 0, i32 0 + %59 = load i64, i64* %58, align 8, !tbaa !14 + store i64 %59, i64* @iNode, align 8, !tbaa !21 + %60 = getelementptr inbounds %struct._QITEM, %struct._QITEM* %55, i64 0, i32 1 + %61 = load i64, i64* %60, align 8, !tbaa !16 + store i64 %61, i64* @iDist, align 8, !tbaa !21 + %62 = getelementptr inbounds %struct._QITEM, %struct._QITEM* %55, i64 0, i32 2 + %63 = load i64, i64* %62, align 8, !tbaa !17 + store i64 %63, i64* @iPrev, align 8, !tbaa !21 + %64 = getelementptr inbounds %struct._QITEM, %struct._QITEM* %55, i64 0, i32 3 + %65 = load %struct._QITEM*, %struct._QITEM** %64, align 8, !tbaa !18 + store %struct._QITEM* %65, %struct._QITEM** @qHead, align 8, !tbaa !5 + %66 = add nsw i64 %54, -1 + store i64 %66, i64* @g_qCount, align 8, !tbaa !21 + br label %67 + +67: ; preds = %53, %57 + store i64 0, i64* @i, align 8, !tbaa !21 + br label %68 + +68: ; preds = %67, %117 + %69 = phi i64 [ 0, %67 ], [ %119, %117 ] + %70 = load i64, i64* @iNode, align 8, !tbaa !21 + %71 = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* @AdjMatrix, i64 0, i64 %70, i64 %69 + %72 = load i64, i64* %71, align 8, !tbaa !21 + store i64 %72, i64* @iCost, align 8, !tbaa !21 + %73 = icmp eq i64 %72, 9999 + br i1 %73, label %117, label %74 + +74: ; preds = %68 + %75 = getelementptr inbounds [100 x %struct._NODE], [100 x %struct._NODE]* @rgnNodes, i64 0, i64 %69, i32 0 + %76 = load i64, i64* %75, align 16, !tbaa !22 + %77 = icmp eq i64 %76, 9999 + br i1 %77, label %82, label %78 + +78: ; preds = %74 + %79 = load i64, i64* @iDist, align 8, !tbaa !21 + %80 = add nsw i64 %79, %72 + %81 = icmp sgt i64 %76, %80 + br i1 %81, label %82, label %117 + +82: ; preds = %78, %74 + %83 = load i64, i64* @iDist, align 8, !tbaa !21 + %84 = add nsw i64 %83, %72 + store i64 %84, i64* %75, align 16, !tbaa !22 + %85 = getelementptr inbounds [100 x %struct._NODE], [100 x %struct._NODE]* @rgnNodes, i64 0, i64 %69, i32 1 + store i64 %70, i64* %85, align 8, !tbaa !9 + %86 = load i8*, i8** @alloc_ptr, align 8, !tbaa !5 + %87 = getelementptr inbounds i8, i8* %86, i64 32 + %88 = icmp ult i8* %87, getelementptr inbounds ([16384 x i8], [16384 x i8]* @alloc_pool, i64 1, i64 0) + %89 = select i1 %88, i8* %87, i8* getelementptr inbounds ([16384 x i8], [16384 x i8]* @alloc_pool, i64 0, i64 32) + %90 = select i1 %88, i8* %86, i8* getelementptr inbounds ([16384 x i8], [16384 x i8]* @alloc_pool, i64 0, i64 0) + store i8* %89, i8** @alloc_ptr, align 8, !tbaa !5 + %91 = load %struct._QITEM*, %struct._QITEM** @qHead, align 8, !tbaa !5 + %92 = icmp eq i8* %90, null + br i1 %92, label %93, label %96 + +93: ; preds = %82 + %94 = call i32 @puts(i8* nonnull dereferenceable(1) getelementptr inbounds ([14 x i8], [14 x i8]* @str, i64 0, i64 0)) #7 + br label %95 + +95: ; preds = %95, %93 + br label %95, !llvm.loop !12 + +96: ; preds = %82 + %97 = bitcast i8* %90 to i64* + store i64 %69, i64* %97, align 8, !tbaa !14 + %98 = getelementptr inbounds i8, i8* %90, i64 8 + %99 = bitcast i8* %98 to i64* + store i64 %84, i64* %99, align 8, !tbaa !16 + %100 = getelementptr inbounds i8, i8* %90, i64 16 + %101 = bitcast i8* %100 to i64* + store i64 %70, i64* %101, align 8, !tbaa !17 + %102 = getelementptr inbounds i8, i8* %90, i64 24 + %103 = bitcast i8* %102 to %struct._QITEM** + store %struct._QITEM* null, %struct._QITEM** %103, align 8, !tbaa !18 + %104 = icmp eq %struct._QITEM* %91, null + br i1 %104, label %113, label %105 + +105: ; preds = %96, %105 + %106 = phi %struct._QITEM* [ %108, %105 ], [ %91, %96 ] + %107 = getelementptr inbounds %struct._QITEM, %struct._QITEM* %106, i64 0, i32 3 + %108 = load %struct._QITEM*, %struct._QITEM** %107, align 8, !tbaa !18 + %109 = icmp eq %struct._QITEM* %108, null + br i1 %109, label %110, label %105, !llvm.loop !19 + +110: ; preds = %105 + %111 = getelementptr inbounds %struct._QITEM, %struct._QITEM* %106, i64 0, i32 3 + %112 = bitcast %struct._QITEM** %111 to i8** + br label %113 + +113: ; preds = %96, %110 + %114 = phi i8** [ %112, %110 ], [ bitcast (%struct._QITEM** @qHead to i8**), %96 ] + store i8* %90, i8** %114, align 8, !tbaa !5 + %115 = load i64, i64* @g_qCount, align 8, !tbaa !21 + %116 = add nsw i64 %115, 1 + store i64 %116, i64* @g_qCount, align 8, !tbaa !21 + br label %117 + +117: ; preds = %68, %113, %78 + %118 = load i64, i64* @i, align 8, !tbaa !21 + %119 = add nsw i64 %118, 1 + store i64 %119, i64* @i, align 8, !tbaa !21 + %120 = icmp slt i64 %118, 99 + br i1 %120, label %68, label %49, !llvm.loop !25 + +121: ; preds = %49, %43 + %122 = getelementptr inbounds [100 x %struct._NODE], [100 x %struct._NODE]* @rgnNodes, i64 0, i64 %1, i32 0 + %123 = load i64, i64* %122, align 16, !tbaa !22 + %124 = call i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([32 x i8], [32 x i8]* @.str.3, i64 0, i64 0), i64 %123) + %125 = call i32 @puts(i8* nonnull dereferenceable(1) getelementptr inbounds ([10 x i8], [10 x i8]* @str.7, i64 0, i64 0)) + call void @print_path(%struct._NODE* getelementptr inbounds ([100 x %struct._NODE], [100 x %struct._NODE]* @rgnNodes, i64 0, i64 0), i64 %1) + %126 = call i32 @putchar(i32 10) + br label %127 + +127: ; preds = %121, %11 + ret void +} + +; Function Attrs: nofree nounwind sspstrong uwtable +define dso_local i32 @main(i32 %0, i8** nocapture readnone %1) local_unnamed_addr #2 { + call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(80000) bitcast ([100 x [100 x i64]]* @AdjMatrix to i8*), i8* noundef nonnull align 16 dereferenceable(80000) bitcast ([100 x [100 x i64]]* @dijkstra_input_data to i8*), i64 80000, i1 false) + br label %3 + +3: ; preds = %2, %3 + %4 = phi i64 [ %8, %3 ], [ 50, %2 ] + %5 = phi i64 [ %7, %3 ], [ 0, %2 ] + %6 = srem i64 %4, 100 + call void @dijkstra(i64 %5, i64 %6) + %7 = add nuw nsw i64 %5, 1 + %8 = add nsw i64 %6, 1 + %9 = icmp eq i64 %7, 100 + br i1 %9, label %10, label %3, !llvm.loop !26 + +10: ; preds = %3 + ret i32 0 +} + +; Function Attrs: nofree nounwind sspstrong uwtable +define dso_local void @print_dijkstra() local_unnamed_addr #2 { + br label %2 + +1: ; preds = %2 + ret void + +2: ; preds = %0, %2 + %3 = phi i64 [ 0, %0 ], [ %9, %2 ] + %4 = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* @dijkstra_input_data, i64 0, i64 %3, i64 0 + %5 = load i64, i64* %4, align 16, !tbaa !21 + %6 = trunc i64 %5 to i32 + %7 = trunc i64 %3 to i32 + %8 = call i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([16 x i8], [16 x i8]* @.str.6, i64 0, i64 0), i32 %7, i32 %6) + %9 = add nuw nsw i64 %3, 1 + %10 = icmp eq i64 %9, 100 + br i1 %10, label %1, label %2, !llvm.loop !27 +} + +; Function Attrs: nofree nounwind +declare noundef i32 @puts(i8* nocapture noundef readonly) local_unnamed_addr #5 + +; Function Attrs: nofree nounwind +declare noundef i32 @putchar(i32 noundef) local_unnamed_addr #5 + +; Function Attrs: argmemonly nofree nounwind willreturn +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg) #6 + +attributes #0 = { mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #1 = { mustprogress nofree norecurse nosync nounwind readnone sspstrong uwtable willreturn "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #2 = { nofree nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #3 = { nofree nounwind "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #4 = { mustprogress nofree norecurse nosync nounwind readonly sspstrong uwtable willreturn "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #5 = { nofree nounwind } +attributes #6 = { argmemonly nofree nounwind willreturn } +attributes #7 = { nounwind } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = !{!6, !6, i64 0} +!6 = !{!"any pointer", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C/C++ TBAA"} +!9 = !{!10, !11, i64 8} +!10 = !{!"_NODE", !11, i64 0, !11, i64 8} +!11 = !{!"long", !7, i64 0} +!12 = distinct !{!12, !13} +!13 = !{!"llvm.loop.unroll.disable"} +!14 = !{!15, !11, i64 0} +!15 = !{!"_QITEM", !11, i64 0, !11, i64 8, !11, i64 16, !6, i64 24} +!16 = !{!15, !11, i64 8} +!17 = !{!15, !11, i64 16} +!18 = !{!15, !6, i64 24} +!19 = distinct !{!19, !20, !13} +!20 = !{!"llvm.loop.mustprogress"} +!21 = !{!11, !11, i64 0} +!22 = !{!10, !11, i64 0} +!23 = distinct !{!23, !20, !13} +!24 = distinct !{!24, !20, !13} +!25 = distinct !{!25, !20, !13} +!26 = distinct !{!26, !20, !13} +!27 = distinct !{!27, !20, !13} diff --git a/test/duff.ll b/test/duff.ll new file mode 100644 index 0000000..9d11e71 --- /dev/null +++ b/test/duff.ll @@ -0,0 +1,219 @@ +; ModuleID = 'duff.c' +source_filename = "duff.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +@source = dso_local local_unnamed_addr global [100 x i8] zeroinitializer, align 16 +@target = dso_local local_unnamed_addr global [100 x i8] zeroinitializer, align 16 + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local void @duffcopy(i8* nocapture %0, i8* nocapture readonly %1, i32 %2) local_unnamed_addr #0 { + %4 = add nsw i32 %2, 7 + %5 = sdiv i32 %4, 8 + %6 = srem i32 %2, 8 + switch i32 %6, label %65 [ + i32 0, label %7 + i32 7, label %14 + i32 6, label %21 + i32 5, label %28 + i32 4, label %35 + i32 3, label %42 + i32 2, label %49 + i32 1, label %56 + ] + +7: ; preds = %3, %56 + %8 = phi i8* [ %60, %56 ], [ %1, %3 ] + %9 = phi i8* [ %62, %56 ], [ %0, %3 ] + %10 = phi i32 [ %63, %56 ], [ %5, %3 ] + %11 = getelementptr inbounds i8, i8* %8, i64 1 + %12 = load i8, i8* %8, align 1, !tbaa !5 + %13 = getelementptr inbounds i8, i8* %9, i64 1 + store i8 %12, i8* %9, align 1, !tbaa !5 + br label %14 + +14: ; preds = %3, %7 + %15 = phi i8* [ %11, %7 ], [ %1, %3 ] + %16 = phi i8* [ %13, %7 ], [ %0, %3 ] + %17 = phi i32 [ %10, %7 ], [ %5, %3 ] + %18 = getelementptr inbounds i8, i8* %15, i64 1 + %19 = load i8, i8* %15, align 1, !tbaa !5 + %20 = getelementptr inbounds i8, i8* %16, i64 1 + store i8 %19, i8* %16, align 1, !tbaa !5 + br label %21 + +21: ; preds = %3, %14 + %22 = phi i8* [ %18, %14 ], [ %1, %3 ] + %23 = phi i8* [ %20, %14 ], [ %0, %3 ] + %24 = phi i32 [ %17, %14 ], [ %5, %3 ] + %25 = getelementptr inbounds i8, i8* %22, i64 1 + %26 = load i8, i8* %22, align 1, !tbaa !5 + %27 = getelementptr inbounds i8, i8* %23, i64 1 + store i8 %26, i8* %23, align 1, !tbaa !5 + br label %28 + +28: ; preds = %3, %21 + %29 = phi i8* [ %25, %21 ], [ %1, %3 ] + %30 = phi i8* [ %27, %21 ], [ %0, %3 ] + %31 = phi i32 [ %24, %21 ], [ %5, %3 ] + %32 = getelementptr inbounds i8, i8* %29, i64 1 + %33 = load i8, i8* %29, align 1, !tbaa !5 + %34 = getelementptr inbounds i8, i8* %30, i64 1 + store i8 %33, i8* %30, align 1, !tbaa !5 + br label %35 + +35: ; preds = %3, %28 + %36 = phi i8* [ %32, %28 ], [ %1, %3 ] + %37 = phi i8* [ %34, %28 ], [ %0, %3 ] + %38 = phi i32 [ %31, %28 ], [ %5, %3 ] + %39 = getelementptr inbounds i8, i8* %36, i64 1 + %40 = load i8, i8* %36, align 1, !tbaa !5 + %41 = getelementptr inbounds i8, i8* %37, i64 1 + store i8 %40, i8* %37, align 1, !tbaa !5 + br label %42 + +42: ; preds = %3, %35 + %43 = phi i8* [ %39, %35 ], [ %1, %3 ] + %44 = phi i8* [ %41, %35 ], [ %0, %3 ] + %45 = phi i32 [ %38, %35 ], [ %5, %3 ] + %46 = getelementptr inbounds i8, i8* %43, i64 1 + %47 = load i8, i8* %43, align 1, !tbaa !5 + %48 = getelementptr inbounds i8, i8* %44, i64 1 + store i8 %47, i8* %44, align 1, !tbaa !5 + br label %49 + +49: ; preds = %3, %42 + %50 = phi i8* [ %46, %42 ], [ %1, %3 ] + %51 = phi i8* [ %48, %42 ], [ %0, %3 ] + %52 = phi i32 [ %45, %42 ], [ %5, %3 ] + %53 = getelementptr inbounds i8, i8* %50, i64 1 + %54 = load i8, i8* %50, align 1, !tbaa !5 + %55 = getelementptr inbounds i8, i8* %51, i64 1 + store i8 %54, i8* %51, align 1, !tbaa !5 + br label %56 + +56: ; preds = %3, %49 + %57 = phi i8* [ %1, %3 ], [ %53, %49 ] + %58 = phi i8* [ %0, %3 ], [ %55, %49 ] + %59 = phi i32 [ %5, %3 ], [ %52, %49 ] + %60 = getelementptr inbounds i8, i8* %57, i64 1 + %61 = load i8, i8* %57, align 1, !tbaa !5 + %62 = getelementptr inbounds i8, i8* %58, i64 1 + store i8 %61, i8* %58, align 1, !tbaa !5 + %63 = add nsw i32 %59, -1 + %64 = icmp sgt i32 %59, 1 + br i1 %64, label %7, label %65, !llvm.loop !8 + +65: ; preds = %56, %3 + ret void +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable writeonly +define dso_local void @initialize(i8* nocapture %0, i32 %1) local_unnamed_addr #1 { + %3 = icmp sgt i32 %1, 0 + br i1 %3, label %4, label %14 + +4: ; preds = %2 + %5 = zext i32 %1 to i64 + br label %6 + +6: ; preds = %4, %6 + %7 = phi i64 [ 0, %4 ], [ %12, %6 ] + %8 = trunc i64 %7 to i32 + %9 = sub nsw i32 %1, %8 + %10 = trunc i32 %9 to i8 + %11 = getelementptr inbounds i8, i8* %0, i64 %7 + store i8 %10, i8* %11, align 1, !tbaa !5 + %12 = add nuw nsw i64 %7, 1 + %13 = icmp eq i64 %12, %5 + br i1 %13, label %14, label %6, !llvm.loop !11 + +14: ; preds = %6, %2 + ret void +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local void @main() local_unnamed_addr #0 { + br label %1 + +1: ; preds = %1, %0 + %2 = phi i64 [ 0, %0 ], [ %6, %1 ] + %3 = trunc i64 %2 to i8 + %4 = sub i8 100, %3 + %5 = getelementptr inbounds [100 x i8], [100 x i8]* @source, i64 0, i64 %2 + store i8 %4, i8* %5, align 1, !tbaa !5 + %6 = add nuw nsw i64 %2, 1 + %7 = icmp eq i64 %6, 100 + br i1 %7, label %8, label %1, !llvm.loop !11 + +8: ; preds = %1 + %9 = load i8, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @target, i64 0, i64 0), align 16, !tbaa !5 + store i8 %9, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @source, i64 0, i64 0), align 16, !tbaa !5 + %10 = load i8, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @target, i64 0, i64 1), align 1, !tbaa !5 + store i8 %10, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @source, i64 0, i64 1), align 1, !tbaa !5 + %11 = load i8, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @target, i64 0, i64 2), align 2, !tbaa !5 + store i8 %11, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @source, i64 0, i64 2), align 2, !tbaa !5 + br label %12 + +12: ; preds = %8, %12 + %13 = phi i32 [ 6, %8 ], [ %18, %12 ] + %14 = phi i8* [ getelementptr inbounds ([100 x i8], [100 x i8]* @source, i64 0, i64 0), %8 ], [ %33, %12 ] + %15 = phi i8* [ getelementptr inbounds ([100 x i8], [100 x i8]* @target, i64 0, i64 0), %8 ], [ %31, %12 ] + %16 = getelementptr inbounds i8, i8* %14, i64 3 + %17 = getelementptr inbounds i8, i8* %15, i64 3 + %18 = add nsw i32 %13, -1 + %19 = getelementptr inbounds i8, i8* %15, i64 4 + %20 = load i8, i8* %17, align 1, !tbaa !5 + %21 = getelementptr inbounds i8, i8* %14, i64 4 + store i8 %20, i8* %16, align 1, !tbaa !5 + %22 = getelementptr inbounds i8, i8* %15, i64 5 + %23 = load i8, i8* %19, align 1, !tbaa !5 + %24 = getelementptr inbounds i8, i8* %14, i64 5 + store i8 %23, i8* %21, align 1, !tbaa !5 + %25 = getelementptr inbounds i8, i8* %15, i64 6 + %26 = load i8, i8* %22, align 1, !tbaa !5 + %27 = getelementptr inbounds i8, i8* %14, i64 6 + store i8 %26, i8* %24, align 1, !tbaa !5 + %28 = getelementptr inbounds i8, i8* %15, i64 7 + %29 = load i8, i8* %25, align 1, !tbaa !5 + %30 = getelementptr inbounds i8, i8* %14, i64 7 + store i8 %29, i8* %27, align 1, !tbaa !5 + %31 = getelementptr inbounds i8, i8* %15, i64 8 + %32 = load i8, i8* %28, align 1, !tbaa !5 + %33 = getelementptr inbounds i8, i8* %14, i64 8 + store i8 %32, i8* %30, align 1, !tbaa !5 + %34 = getelementptr inbounds i8, i8* %15, i64 9 + %35 = load i8, i8* %31, align 1, !tbaa !5 + %36 = getelementptr inbounds i8, i8* %14, i64 9 + store i8 %35, i8* %33, align 1, !tbaa !5 + %37 = getelementptr inbounds i8, i8* %15, i64 10 + %38 = load i8, i8* %34, align 1, !tbaa !5 + %39 = getelementptr inbounds i8, i8* %14, i64 10 + store i8 %38, i8* %36, align 1, !tbaa !5 + %40 = load i8, i8* %37, align 1, !tbaa !5 + store i8 %40, i8* %39, align 1, !tbaa !5 + %41 = icmp ugt i32 %13, 2 + br i1 %41, label %12, label %42, !llvm.loop !8 + +42: ; preds = %12 + ret void +} + +attributes #0 = { nofree norecurse nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #1 = { nofree norecurse nosync nounwind sspstrong uwtable writeonly "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = !{!6, !6, i64 0} +!6 = !{!"omnipotent char", !7, i64 0} +!7 = !{!"Simple C/C++ TBAA"} +!8 = distinct !{!8, !9, !10} +!9 = !{!"llvm.loop.mustprogress"} +!10 = !{!"llvm.loop.unroll.disable"} +!11 = distinct !{!11, !9, !10} diff --git a/test/edn.ll b/test/edn.ll new file mode 100644 index 0000000..6056579 --- /dev/null +++ b/test/edn.ll @@ -0,0 +1,789 @@ +; ModuleID = 'edn.c' +source_filename = "edn.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +@__const.main.a = private unnamed_addr constant [200 x i16] [i16 0, i16 2047, i16 3072, i16 2048, i16 512, i16 -2048, i16 -3328, i16 1024, i16 0, i16 2047, i16 3072, i16 2048, i16 512, i16 -2048, i16 -3328, i16 1024, i16 0, i16 2047, i16 3072, i16 2048, i16 512, i16 -2048, i16 -3328, i16 1024, i16 0, i16 2047, i16 3072, i16 2048, i16 512, i16 -2048, i16 -3328, i16 1024, i16 0, i16 2047, i16 3072, i16 2048, i16 512, i16 -2048, i16 -3328, i16 1024, i16 0, i16 2047, i16 3072, i16 2048, i16 512, i16 -2048, i16 -3328, i16 1024, i16 0, i16 2047, i16 3072, i16 2048, i16 512, i16 -2048, i16 -3328, i16 1024, i16 0, i16 2047, i16 3072, i16 2048, i16 512, i16 -2048, i16 -3328, i16 1024, i16 0, i16 2047, i16 3072, i16 2048, i16 512, i16 -2048, i16 -3328, i16 1024, i16 0, i16 2047, i16 3072, i16 2048, i16 512, i16 -2048, i16 -3328, i16 1024, i16 0, i16 2047, i16 3072, i16 2048, i16 512, i16 -2048, i16 -3328, i16 1024, i16 0, i16 2047, i16 3072, i16 2048, i16 512, i16 -2048, i16 -3328, i16 1024, i16 0, i16 2047, i16 3072, i16 2048, i16 512, i16 -2048, i16 -3328, i16 1024, i16 0, i16 2047, i16 3072, i16 2048, i16 512, i16 -2048, i16 -3328, i16 1024, i16 0, i16 2047, i16 3072, i16 2048, i16 512, i16 -2048, i16 -3328, i16 1024, i16 0, i16 2047, i16 3072, i16 2048, i16 512, i16 -2048, i16 -3328, i16 1024, i16 0, i16 2047, i16 3072, i16 2048, i16 512, i16 -2048, i16 -3328, i16 1024, i16 0, i16 2047, i16 3072, i16 2048, i16 512, i16 -2048, i16 -3328, i16 1024, i16 0, i16 2047, i16 3072, i16 2048, i16 512, i16 -2048, i16 -3328, i16 1024, i16 0, i16 2047, i16 3072, i16 2048, i16 512, i16 -2048, i16 -3328, i16 1024, i16 0, i16 2047, i16 3072, i16 2048, i16 512, i16 -2048, i16 -3328, i16 1024, i16 0, i16 2047, i16 3072, i16 2048, i16 512, i16 -2048, i16 -3328, i16 1024, i16 0, i16 2047, i16 3072, i16 2048, i16 512, i16 -2048, i16 -3328, i16 1024, i16 0, i16 2047, i16 3072, i16 2048, i16 512, i16 -2048, i16 -3328, i16 1024, i16 0, i16 2047, i16 3072, i16 2048, i16 512, i16 -2048, i16 -3328, i16 1024], align 16 +@__const.main.b = private unnamed_addr constant [200 x i16] [i16 3168, i16 3136, i16 3104, i16 3072, i16 -2560, i16 -3072, i16 -3584, i16 -4096, i16 3168, i16 3136, i16 3104, i16 3072, i16 -2560, i16 -3072, i16 -3584, i16 -4096, i16 3168, i16 3136, i16 3104, i16 3072, i16 -2560, i16 -3072, i16 -3584, i16 -4096, i16 3168, i16 3136, i16 3104, i16 3072, i16 -2560, i16 -3072, i16 -3584, i16 -4096, i16 3168, i16 3136, i16 3104, i16 3072, i16 -2560, i16 -3072, i16 -3584, i16 -4096, i16 3168, i16 3136, i16 3104, i16 3072, i16 -2560, i16 -3072, i16 -3584, i16 -4096, i16 3168, i16 3136, i16 3104, i16 3072, i16 -2560, i16 -3072, i16 -3584, i16 -4096, i16 3168, i16 3136, i16 3104, i16 3072, i16 -2560, i16 -3072, i16 -3584, i16 -4096, i16 3168, i16 3136, i16 3104, i16 3072, i16 -2560, i16 -3072, i16 -3584, i16 -4096, i16 3168, i16 3136, i16 3104, i16 3072, i16 -2560, i16 -3072, i16 -3584, i16 -4096, i16 3168, i16 3136, i16 3104, i16 3072, i16 -2560, i16 -3072, i16 -3584, i16 -4096, i16 3168, i16 3136, i16 3104, i16 3072, i16 -2560, i16 -3072, i16 -3584, i16 -4096, i16 3168, i16 3136, i16 3104, i16 3072, i16 -2560, i16 -3072, i16 -3584, i16 -4096, i16 3168, i16 3136, i16 3104, i16 3072, i16 -2560, i16 -3072, i16 -3584, i16 -4096, i16 3168, i16 3136, i16 3104, i16 3072, i16 -2560, i16 -3072, i16 -3584, i16 -4096, i16 3168, i16 3136, i16 3104, i16 3072, i16 -2560, i16 -3072, i16 -3584, i16 -4096, i16 3168, i16 3136, i16 3104, i16 3072, i16 -2560, i16 -3072, i16 -3584, i16 -4096, i16 3168, i16 3136, i16 3104, i16 3072, i16 -2560, i16 -3072, i16 -3584, i16 -4096, i16 3168, i16 3136, i16 3104, i16 3072, i16 -2560, i16 -3072, i16 -3584, i16 -4096, i16 3168, i16 3136, i16 3104, i16 3072, i16 -2560, i16 -3072, i16 -3584, i16 -4096, i16 3168, i16 3136, i16 3104, i16 3072, i16 -2560, i16 -3072, i16 -3584, i16 -4096, i16 3168, i16 3136, i16 3104, i16 3072, i16 -2560, i16 -3072, i16 -3584, i16 -4096, i16 3168, i16 3136, i16 3104, i16 3072, i16 -2560, i16 -3072, i16 -3584, i16 -4096, i16 3168, i16 3136, i16 3104, i16 3072, i16 -2560, i16 -3072, i16 -3584, i16 -4096, i16 3168, i16 3136, i16 3104, i16 3072, i16 -2560, i16 -3072, i16 -3584, i16 -4096], align 16 + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local void @vec_mpy1(i16* nocapture %0, i16* nocapture readonly %1, i16 signext %2) local_unnamed_addr #0 { + %4 = sext i16 %2 to i32 + br label %5 + +5: ; preds = %3, %5 + %6 = phi i64 [ 0, %3 ], [ %16, %5 ] + %7 = getelementptr inbounds i16, i16* %1, i64 %6 + %8 = load i16, i16* %7, align 2, !tbaa !5 + %9 = sext i16 %8 to i32 + %10 = mul nsw i32 %9, %4 + %11 = lshr i32 %10, 15 + %12 = getelementptr inbounds i16, i16* %0, i64 %6 + %13 = load i16, i16* %12, align 2, !tbaa !5 + %14 = trunc i32 %11 to i16 + %15 = add i16 %13, %14 + store i16 %15, i16* %12, align 2, !tbaa !5 + %16 = add nuw nsw i64 %6, 1 + %17 = icmp eq i64 %16, 150 + br i1 %17, label %18, label %5, !llvm.loop !9 + +18: ; preds = %5 + ret void +} + +; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn +declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #1 + +; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn +declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) #1 + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local i64 @mac(i16* nocapture readonly %0, i16* nocapture readonly %1, i64 %2, i64* nocapture %3) local_unnamed_addr #0 { + %5 = load i64, i64* %3, align 8, !tbaa !12 + br label %6 + +6: ; preds = %4, %6 + %7 = phi i64 [ %5, %4 ], [ %18, %6 ] + %8 = phi i64 [ 0, %4 ], [ %22, %6 ] + %9 = phi i64 [ %2, %4 ], [ %21, %6 ] + %10 = getelementptr inbounds i16, i16* %1, i64 %8 + %11 = load i16, i16* %10, align 2, !tbaa !5 + %12 = sext i16 %11 to i32 + %13 = getelementptr inbounds i16, i16* %0, i64 %8 + %14 = load i16, i16* %13, align 2, !tbaa !5 + %15 = sext i16 %14 to i32 + %16 = mul nsw i32 %15, %12 + %17 = sext i32 %16 to i64 + %18 = add nsw i64 %7, %17 + %19 = mul nsw i32 %12, %12 + %20 = zext i32 %19 to i64 + %21 = add nsw i64 %9, %20 + %22 = add nuw nsw i64 %8, 1 + %23 = icmp eq i64 %22, 150 + br i1 %23, label %24, label %6, !llvm.loop !14 + +24: ; preds = %6 + store i64 %18, i64* %3, align 8, !tbaa !12 + ret i64 %21 +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local void @fir(i16* nocapture readonly %0, i16* nocapture readonly %1, i64* nocapture %2) local_unnamed_addr #0 { + br label %4 + +4: ; preds = %3, %20 + %5 = phi i64 [ 0, %3 ], [ %23, %20 ] + br label %6 + +6: ; preds = %4, %6 + %7 = phi i64 [ 0, %4 ], [ %17, %6 ] + %8 = phi i64 [ 0, %4 ], [ %18, %6 ] + %9 = add nuw nsw i64 %8, %5 + %10 = getelementptr inbounds i16, i16* %0, i64 %9 + %11 = load i16, i16* %10, align 2, !tbaa !5 + %12 = sext i16 %11 to i64 + %13 = getelementptr inbounds i16, i16* %1, i64 %8 + %14 = load i16, i16* %13, align 2, !tbaa !5 + %15 = sext i16 %14 to i64 + %16 = mul nsw i64 %15, %12 + %17 = add nsw i64 %16, %7 + %18 = add nuw nsw i64 %8, 1 + %19 = icmp eq i64 %18, 50 + br i1 %19, label %20, label %6, !llvm.loop !15 + +20: ; preds = %6 + %21 = ashr i64 %17, 15 + %22 = getelementptr inbounds i64, i64* %2, i64 %5 + store i64 %21, i64* %22, align 8, !tbaa !12 + %23 = add nuw nsw i64 %5, 1 + %24 = icmp eq i64 %23, 50 + br i1 %24, label %25, label %4, !llvm.loop !16 + +25: ; preds = %20 + ret void +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local void @fir_no_red_ld(i16* nocapture readonly %0, i16* nocapture readonly %1, i64* nocapture %2) local_unnamed_addr #0 { + br label %4 + +4: ; preds = %3, %44 + %5 = phi i64 [ 0, %3 ], [ %50, %44 ] + %6 = getelementptr inbounds i16, i16* %0, i64 %5 + %7 = load i16, i16* %6, align 2, !tbaa !5 + br label %8 + +8: ; preds = %4, %8 + %9 = phi i64 [ 0, %4 ], [ %42, %8 ] + %10 = phi i64 [ 0, %4 ], [ %37, %8 ] + %11 = phi i16 [ %7, %4 ], [ %30, %8 ] + %12 = phi i64 [ 0, %4 ], [ %41, %8 ] + %13 = add nuw nsw i64 %9, %5 + %14 = or i64 %13, 1 + %15 = getelementptr inbounds i16, i16* %0, i64 %14 + %16 = load i16, i16* %15, align 2, !tbaa !5 + %17 = getelementptr inbounds i16, i16* %1, i64 %9 + %18 = load i16, i16* %17, align 2, !tbaa !5 + %19 = sext i16 %11 to i32 + %20 = sext i16 %18 to i32 + %21 = mul nsw i32 %20, %19 + %22 = sext i32 %21 to i64 + %23 = add nsw i64 %10, %22 + %24 = sext i16 %16 to i32 + %25 = mul nsw i32 %20, %24 + %26 = sext i32 %25 to i64 + %27 = add nsw i64 %12, %26 + %28 = add nuw nsw i64 %13, 2 + %29 = getelementptr inbounds i16, i16* %0, i64 %28 + %30 = load i16, i16* %29, align 2, !tbaa !5 + %31 = or i64 %9, 1 + %32 = getelementptr inbounds i16, i16* %1, i64 %31 + %33 = load i16, i16* %32, align 2, !tbaa !5 + %34 = sext i16 %33 to i32 + %35 = mul nsw i32 %34, %24 + %36 = sext i32 %35 to i64 + %37 = add nsw i64 %23, %36 + %38 = sext i16 %30 to i32 + %39 = mul nsw i32 %34, %38 + %40 = sext i32 %39 to i64 + %41 = add nsw i64 %27, %40 + %42 = add nuw nsw i64 %9, 2 + %43 = icmp ult i64 %9, 30 + br i1 %43, label %8, label %44, !llvm.loop !17 + +44: ; preds = %8 + %45 = ashr i64 %37, 15 + %46 = getelementptr inbounds i64, i64* %2, i64 %5 + store i64 %45, i64* %46, align 8, !tbaa !12 + %47 = ashr i64 %41, 15 + %48 = or i64 %5, 1 + %49 = getelementptr inbounds i64, i64* %2, i64 %48 + store i64 %47, i64* %49, align 8, !tbaa !12 + %50 = add nuw nsw i64 %5, 2 + %51 = icmp ult i64 %5, 98 + br i1 %51, label %4, label %52, !llvm.loop !18 + +52: ; preds = %44 + ret void +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local i64 @latsynth(i16* nocapture %0, i16* nocapture readonly %1, i64 %2, i64 %3) local_unnamed_addr #0 { + %5 = add nsw i64 %2, -1 + %6 = getelementptr inbounds i16, i16* %0, i64 %5 + %7 = load i16, i16* %6, align 2, !tbaa !5 + %8 = sext i16 %7 to i64 + %9 = getelementptr inbounds i16, i16* %1, i64 %5 + %10 = load i16, i16* %9, align 2, !tbaa !5 + %11 = sext i16 %10 to i64 + %12 = mul nsw i64 %11, %8 + %13 = sub nsw i64 %3, %12 + %14 = icmp sgt i64 %2, 1 + br i1 %14, label %15, label %37 + +15: ; preds = %4 + %16 = add nsw i64 %2, -2 + br label %17 + +17: ; preds = %15, %17 + %18 = phi i64 [ %35, %17 ], [ %16, %15 ] + %19 = phi i64 [ %27, %17 ], [ %13, %15 ] + %20 = getelementptr inbounds i16, i16* %0, i64 %18 + %21 = load i16, i16* %20, align 2, !tbaa !5 + %22 = sext i16 %21 to i64 + %23 = getelementptr inbounds i16, i16* %1, i64 %18 + %24 = load i16, i16* %23, align 2, !tbaa !5 + %25 = sext i16 %24 to i64 + %26 = mul nsw i64 %25, %22 + %27 = sub nsw i64 %19, %26 + %28 = ashr i64 %27, 16 + %29 = mul nsw i64 %28, %25 + %30 = lshr i64 %29, 16 + %31 = trunc i64 %30 to i16 + %32 = add i16 %21, %31 + %33 = add nsw i64 %18, 1 + %34 = getelementptr inbounds i16, i16* %0, i64 %33 + store i16 %32, i16* %34, align 2, !tbaa !5 + %35 = add nsw i64 %18, -1 + %36 = icmp sgt i64 %18, 0 + br i1 %36, label %17, label %37, !llvm.loop !19 + +37: ; preds = %17, %4 + %38 = phi i64 [ %13, %4 ], [ %27, %17 ] + %39 = lshr i64 %38, 16 + %40 = trunc i64 %39 to i16 + store i16 %40, i16* %0, align 2, !tbaa !5 + ret i64 %38 +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local void @iir1(i16* nocapture readonly %0, i16* nocapture readonly %1, i64* nocapture %2, i64* nocapture %3) local_unnamed_addr #0 { + %5 = load i16, i16* %1, align 2, !tbaa !5 + %6 = sext i16 %5 to i64 + br label %7 + +7: ; preds = %4, %7 + %8 = phi i64 [ 0, %4 ], [ %38, %7 ] + %9 = phi i16* [ %0, %4 ], [ %36, %7 ] + %10 = phi i64 [ %6, %4 ], [ %35, %7 ] + %11 = phi i64* [ %3, %4 ], [ %37, %7 ] + %12 = getelementptr inbounds i16, i16* %9, i64 2 + %13 = load i16, i16* %12, align 2, !tbaa !5 + %14 = sext i16 %13 to i64 + %15 = load i64, i64* %11, align 8, !tbaa !12 + %16 = mul nsw i64 %15, %14 + %17 = getelementptr inbounds i16, i16* %9, i64 3 + %18 = load i16, i16* %17, align 2, !tbaa !5 + %19 = sext i16 %18 to i64 + %20 = getelementptr inbounds i64, i64* %11, i64 1 + %21 = load i64, i64* %20, align 8, !tbaa !12 + %22 = mul nsw i64 %21, %19 + %23 = add nsw i64 %22, %16 + %24 = ashr i64 %23, 15 + %25 = add nsw i64 %24, %10 + %26 = load i16, i16* %9, align 2, !tbaa !5 + %27 = sext i16 %26 to i64 + %28 = mul nsw i64 %15, %27 + %29 = getelementptr inbounds i16, i16* %9, i64 1 + %30 = load i16, i16* %29, align 2, !tbaa !5 + %31 = sext i16 %30 to i64 + %32 = mul nsw i64 %21, %31 + %33 = add nsw i64 %32, %28 + %34 = ashr i64 %33, 15 + %35 = add nsw i64 %34, %25 + store i64 %15, i64* %20, align 8, !tbaa !12 + store i64 %25, i64* %11, align 8, !tbaa !12 + %36 = getelementptr inbounds i16, i16* %9, i64 4 + %37 = getelementptr inbounds i64, i64* %11, i64 2 + %38 = add nuw nsw i64 %8, 1 + %39 = icmp eq i64 %38, 50 + br i1 %39, label %40, label %7, !llvm.loop !20 + +40: ; preds = %7 + store i64 %35, i64* %2, align 8, !tbaa !12 + ret void +} + +; Function Attrs: nofree norecurse nosync nounwind readnone sspstrong uwtable +define dso_local i64 @codebook(i64 %0, i64 %1, i64 %2, i64 %3, i64 returned %4, i16* nocapture readnone %5, i16 signext %6, i16 signext %7) local_unnamed_addr #2 { + ret i64 %4 +} + +; Function Attrs: nofree nosync nounwind sspstrong uwtable +define dso_local void @jpegdct(i16* nocapture %0, i16* nocapture readonly %1) local_unnamed_addr #3 { + %3 = alloca [12 x i64], align 16 + %4 = bitcast [12 x i64]* %3 to i8* + call void @llvm.lifetime.start.p0i8(i64 96, i8* nonnull %4) #5 + %5 = getelementptr inbounds [12 x i64], [12 x i64]* %3, i64 0, i64 0 + %6 = getelementptr inbounds [12 x i64], [12 x i64]* %3, i64 0, i64 3 + %7 = getelementptr inbounds [12 x i64], [12 x i64]* %3, i64 0, i64 8 + %8 = getelementptr inbounds [12 x i64], [12 x i64]* %3, i64 0, i64 9 + %9 = getelementptr inbounds [12 x i64], [12 x i64]* %3, i64 0, i64 1 + %10 = getelementptr inbounds [12 x i64], [12 x i64]* %3, i64 0, i64 2 + %11 = getelementptr inbounds [12 x i64], [12 x i64]* %3, i64 0, i64 10 + %12 = getelementptr inbounds [12 x i64], [12 x i64]* %3, i64 0, i64 11 + %13 = getelementptr inbounds i16, i16* %1, i64 10 + %14 = getelementptr inbounds i16, i16* %1, i64 9 + %15 = getelementptr inbounds i16, i16* %1, i64 11 + %16 = getelementptr inbounds [12 x i64], [12 x i64]* %3, i64 0, i64 4 + %17 = getelementptr inbounds [12 x i64], [12 x i64]* %3, i64 0, i64 7 + %18 = getelementptr inbounds i16, i16* %1, i64 2 + %19 = getelementptr inbounds [12 x i64], [12 x i64]* %3, i64 0, i64 5 + %20 = getelementptr inbounds [12 x i64], [12 x i64]* %3, i64 0, i64 6 + %21 = getelementptr inbounds i16, i16* %1, i64 8 + %22 = getelementptr inbounds i16, i16* %1, i64 1 + %23 = getelementptr inbounds i16, i16* %1, i64 3 + %24 = getelementptr inbounds i16, i16* %1, i64 4 + %25 = getelementptr inbounds i16, i16* %1, i64 6 + %26 = getelementptr inbounds i16, i16* %1, i64 5 + %27 = getelementptr inbounds i16, i16* %1, i64 7 + br label %28 + +28: ; preds = %2, %204 + %29 = phi i64 [ 13, %2 ], [ %207, %204 ] + %30 = phi i64 [ 0, %2 ], [ %206, %204 ] + %31 = phi i64 [ 8, %2 ], [ %208, %204 ] + %32 = phi i32 [ 1, %2 ], [ %210, %204 ] + %33 = phi i16* [ %0, %2 ], [ %209, %204 ] + %34 = shl nuw nsw i32 %32, 2 + %35 = zext i32 %34 to i64 + %36 = shl nuw nsw i32 %32, 1 + %37 = zext i32 %36 to i64 + %38 = mul nuw nsw i32 %32, 6 + %39 = zext i32 %38 to i64 + %40 = mul nuw nsw i32 %32, 7 + %41 = zext i32 %40 to i64 + %42 = mul nuw nsw i32 %32, 5 + %43 = zext i32 %42 to i64 + %44 = mul nuw nsw i32 %32, 3 + %45 = zext i32 %44 to i64 + %46 = zext i32 %32 to i64 + %47 = zext i32 %32 to i64 + %48 = zext i32 %32 to i64 + %49 = trunc i64 %29 to i32 + %50 = trunc i64 %29 to i32 + %51 = trunc i64 %29 to i32 + %52 = trunc i64 %29 to i32 + br label %53 + +53: ; preds = %28, %73 + %54 = phi i16 [ 0, %28 ], [ %201, %73 ] + %55 = phi i16* [ %33, %28 ], [ %202, %73 ] + br label %56 + +56: ; preds = %53, %56 + %57 = phi i64 [ 0, %53 ], [ %71, %56 ] + %58 = mul nuw nsw i64 %57, %47 + %59 = getelementptr inbounds i16, i16* %55, i64 %58 + %60 = load i16, i16* %59, align 2, !tbaa !5 + %61 = sext i16 %60 to i64 + %62 = sub nuw nsw i64 7, %57 + %63 = mul nsw i64 %62, %48 + %64 = getelementptr inbounds i16, i16* %55, i64 %63 + %65 = load i16, i16* %64, align 2, !tbaa !5 + %66 = sext i16 %65 to i64 + %67 = add nsw i64 %66, %61 + %68 = getelementptr inbounds [12 x i64], [12 x i64]* %3, i64 0, i64 %57 + store i64 %67, i64* %68, align 8, !tbaa !12 + %69 = sub nsw i64 %61, %66 + %70 = getelementptr inbounds [12 x i64], [12 x i64]* %3, i64 0, i64 %62 + store i64 %69, i64* %70, align 8, !tbaa !12 + %71 = add nuw nsw i64 %57, 1 + %72 = icmp eq i64 %71, 4 + br i1 %72, label %73, label %56, !llvm.loop !21 + +73: ; preds = %56 + %74 = load i64, i64* %5, align 16, !tbaa !12 + %75 = load i64, i64* %6, align 8, !tbaa !12 + %76 = add nsw i64 %75, %74 + store i64 %76, i64* %7, align 16, !tbaa !12 + %77 = sub nsw i64 %74, %75 + store i64 %77, i64* %8, align 8, !tbaa !12 + %78 = load i64, i64* %9, align 8, !tbaa !12 + %79 = load i64, i64* %10, align 16, !tbaa !12 + %80 = add nsw i64 %79, %78 + store i64 %80, i64* %11, align 16, !tbaa !12 + %81 = sub nsw i64 %78, %79 + store i64 %81, i64* %12, align 8, !tbaa !12 + %82 = add nsw i64 %80, %76 + %83 = ashr i64 %82, %30 + %84 = trunc i64 %83 to i16 + store i16 %84, i16* %55, align 2, !tbaa !5 + %85 = sub nsw i64 %76, %80 + %86 = ashr i64 %85, %30 + %87 = trunc i64 %86 to i16 + %88 = getelementptr inbounds i16, i16* %55, i64 %35 + store i16 %87, i16* %88, align 2, !tbaa !5 + %89 = add nsw i64 %81, %77 + %90 = trunc i64 %89 to i32 + %91 = shl i32 %90, 16 + %92 = ashr exact i32 %91, 16 + %93 = load i16, i16* %13, align 2, !tbaa !5 + %94 = sext i16 %93 to i32 + %95 = mul nsw i32 %92, %94 + %96 = sext i32 %95 to i64 + store i64 %96, i64* %7, align 16, !tbaa !12 + %97 = load i16, i16* %14, align 2, !tbaa !5 + %98 = sext i16 %97 to i64 + %99 = mul nsw i64 %77, %98 + %100 = ashr i64 %99, %29 + %101 = add i64 %100, %96 + %102 = trunc i64 %101 to i16 + %103 = getelementptr inbounds i16, i16* %55, i64 %37 + store i16 %102, i16* %103, align 2, !tbaa !5 + %104 = load i16, i16* %15, align 2, !tbaa !5 + %105 = sext i16 %104 to i64 + %106 = mul nsw i64 %81, %105 + %107 = ashr i64 %106, %29 + %108 = add i64 %107, %96 + %109 = trunc i64 %108 to i16 + %110 = getelementptr inbounds i16, i16* %55, i64 %39 + store i16 %109, i16* %110, align 2, !tbaa !5 + %111 = load i64, i64* %16, align 16, !tbaa !12 + %112 = load i64, i64* %17, align 8, !tbaa !12 + %113 = add nsw i64 %112, %111 + %114 = trunc i64 %113 to i32 + %115 = shl i32 %114, 16 + %116 = ashr exact i32 %115, 16 + %117 = load i16, i16* %18, align 2, !tbaa !5 + %118 = sext i16 %117 to i32 + %119 = mul nsw i32 %116, %118 + %120 = sext i32 %119 to i64 + store i64 %120, i64* %5, align 16, !tbaa !12 + %121 = load i64, i64* %19, align 8, !tbaa !12 + %122 = load i64, i64* %20, align 16, !tbaa !12 + %123 = add nsw i64 %122, %121 + %124 = trunc i64 %123 to i32 + %125 = shl i32 %124, 16 + %126 = ashr exact i32 %125, 16 + %127 = load i16, i16* %1, align 2, !tbaa !5 + %128 = sext i16 %127 to i32 + %129 = mul nsw i32 %126, %128 + %130 = sext i32 %129 to i64 + store i64 %130, i64* %9, align 8, !tbaa !12 + %131 = add nsw i64 %122, %111 + store i64 %131, i64* %10, align 16, !tbaa !12 + %132 = add nsw i64 %121, %112 + store i64 %132, i64* %6, align 8, !tbaa !12 + %133 = add nsw i64 %131, %132 + %134 = trunc i64 %133 to i32 + %135 = shl i32 %134, 16 + %136 = ashr exact i32 %135, 16 + %137 = load i16, i16* %21, align 2, !tbaa !5 + %138 = sext i16 %137 to i32 + %139 = mul nsw i32 %136, %138 + %140 = sext i32 %139 to i64 + store i64 %140, i64* %7, align 16, !tbaa !12 + %141 = trunc i64 %131 to i32 + %142 = shl i32 %141, 16 + %143 = ashr exact i32 %142, 16 + %144 = load i16, i16* %22, align 2, !tbaa !5 + %145 = sext i16 %144 to i32 + %146 = mul nsw i32 %143, %145 + %147 = sext i32 %146 to i64 + %148 = add nsw i64 %147, %140 + store i64 %148, i64* %10, align 16, !tbaa !12 + %149 = trunc i64 %132 to i32 + %150 = shl i32 %149, 16 + %151 = ashr exact i32 %150, 16 + %152 = load i16, i16* %23, align 2, !tbaa !5 + %153 = sext i16 %152 to i32 + %154 = mul nsw i32 %151, %153 + %155 = sext i32 %154 to i64 + %156 = add nsw i64 %155, %140 + store i64 %156, i64* %6, align 8, !tbaa !12 + %157 = load i16, i16* %24, align 2, !tbaa !5 + %158 = zext i16 %157 to i64 + %159 = mul i64 %111, %158 + %160 = add nsw i64 %148, %120 + %161 = add i64 %160, %159 + %162 = trunc i64 %161 to i32 + %163 = shl i32 %162, 16 + %164 = ashr exact i32 %163, 16 + %165 = ashr i32 %164, %49 + %166 = trunc i32 %165 to i16 + %167 = getelementptr inbounds i16, i16* %55, i64 %41 + store i16 %166, i16* %167, align 2, !tbaa !5 + %168 = load i16, i16* %25, align 2, !tbaa !5 + %169 = zext i16 %168 to i64 + %170 = mul i64 %121, %169 + %171 = add nsw i64 %156, %130 + %172 = add i64 %171, %170 + %173 = trunc i64 %172 to i32 + %174 = shl i32 %173, 16 + %175 = ashr exact i32 %174, 16 + %176 = ashr i32 %175, %50 + %177 = trunc i32 %176 to i16 + %178 = getelementptr inbounds i16, i16* %55, i64 %43 + store i16 %177, i16* %178, align 2, !tbaa !5 + %179 = load i16, i16* %26, align 2, !tbaa !5 + %180 = zext i16 %179 to i64 + %181 = mul i64 %122, %180 + %182 = add nsw i64 %148, %130 + %183 = add i64 %182, %181 + %184 = trunc i64 %183 to i32 + %185 = shl i32 %184, 16 + %186 = ashr exact i32 %185, 16 + %187 = ashr i32 %186, %51 + %188 = trunc i32 %187 to i16 + %189 = getelementptr inbounds i16, i16* %55, i64 %45 + store i16 %188, i16* %189, align 2, !tbaa !5 + %190 = load i16, i16* %27, align 2, !tbaa !5 + %191 = zext i16 %190 to i64 + %192 = mul i64 %112, %191 + %193 = add nsw i64 %156, %120 + %194 = add i64 %193, %192 + %195 = trunc i64 %194 to i32 + %196 = shl i32 %195, 16 + %197 = ashr exact i32 %196, 16 + %198 = ashr i32 %197, %52 + %199 = trunc i32 %198 to i16 + %200 = getelementptr inbounds i16, i16* %55, i64 %46 + store i16 %199, i16* %200, align 2, !tbaa !5 + %201 = add nuw nsw i16 %54, 1 + %202 = getelementptr inbounds i16, i16* %55, i64 %31 + %203 = icmp eq i16 %201, 8 + br i1 %203, label %204, label %53, !llvm.loop !22 + +204: ; preds = %73 + %205 = add nuw nsw i32 %32, 7 + %206 = add nuw nsw i64 %30, 3 + %207 = add nuw nsw i64 %29, 3 + %208 = add nsw i64 %31, -7 + %209 = getelementptr inbounds i16, i16* %202, i64 -64 + %210 = and i32 %205, 65535 + %211 = icmp ult i32 %210, 9 + br i1 %211, label %28, label %212, !llvm.loop !23 + +212: ; preds = %204 + call void @llvm.lifetime.end.p0i8(i64 96, i8* nonnull %4) #5 + ret void +} + +; Function Attrs: nofree nosync nounwind sspstrong uwtable +define dso_local i32 @main() local_unnamed_addr #3 { + %1 = alloca [200 x i16], align 16 + %2 = alloca [200 x i16], align 16 + %3 = alloca [200 x i64], align 16 + %4 = bitcast [200 x i16]* %1 to i8* + call void @llvm.lifetime.start.p0i8(i64 400, i8* nonnull %4) #5 + call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(400) %4, i8* noundef nonnull align 16 dereferenceable(400) bitcast ([200 x i16]* @__const.main.a to i8*), i64 400, i1 false) + %5 = bitcast [200 x i16]* %2 to i8* + call void @llvm.lifetime.start.p0i8(i64 400, i8* nonnull %5) #5 + call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(400) %5, i8* noundef nonnull align 16 dereferenceable(400) bitcast ([200 x i16]* @__const.main.b to i8*), i64 400, i1 false) + %6 = bitcast [200 x i64]* %3 to i8* + call void @llvm.lifetime.start.p0i8(i64 1600, i8* nonnull %6) #5 + %7 = getelementptr inbounds [200 x i16], [200 x i16]* %1, i64 0, i64 0 + br label %8 + +8: ; preds = %8, %0 + %9 = phi i64 [ 0, %0 ], [ %19, %8 ] + %10 = getelementptr inbounds [200 x i16], [200 x i16]* %2, i64 0, i64 %9 + %11 = load i16, i16* %10, align 2, !tbaa !5 + %12 = sext i16 %11 to i32 + %13 = mul nsw i32 %12, 3 + %14 = lshr i32 %13, 15 + %15 = getelementptr inbounds [200 x i16], [200 x i16]* %1, i64 0, i64 %9 + %16 = load i16, i16* %15, align 2, !tbaa !5 + %17 = trunc i32 %14 to i16 + %18 = add i16 %16, %17 + store i16 %18, i16* %15, align 2, !tbaa !5 + %19 = add nuw nsw i64 %9, 1 + %20 = icmp eq i64 %19, 150 + br i1 %20, label %21, label %8, !llvm.loop !9 + +21: ; preds = %8 + %22 = getelementptr inbounds [200 x i64], [200 x i64]* %3, i64 0, i64 0 + %23 = load i64, i64* %22, align 16, !tbaa !12 + br label %24 + +24: ; preds = %24, %21 + %25 = phi i64 [ %23, %21 ], [ %34, %24 ] + %26 = phi i64 [ 0, %21 ], [ %35, %24 ] + %27 = getelementptr inbounds [200 x i16], [200 x i16]* %2, i64 0, i64 %26 + %28 = load i16, i16* %27, align 2, !tbaa !5 + %29 = sext i16 %28 to i64 + %30 = getelementptr inbounds [200 x i16], [200 x i16]* %1, i64 0, i64 %26 + %31 = load i16, i16* %30, align 2, !tbaa !5 + %32 = sext i16 %31 to i64 + %33 = mul nsw i64 %32, %29 + %34 = add nsw i64 %33, %25 + %35 = add nuw nsw i64 %26, 1 + %36 = icmp eq i64 %35, 150 + br i1 %36, label %37, label %24, !llvm.loop !14 + +37: ; preds = %24 + store i64 %34, i64* %22, align 16, !tbaa !12 + br label %38 + +38: ; preds = %54, %37 + %39 = phi i64 [ 0, %37 ], [ %57, %54 ] + br label %40 + +40: ; preds = %40, %38 + %41 = phi i64 [ 0, %38 ], [ %51, %40 ] + %42 = phi i64 [ 0, %38 ], [ %52, %40 ] + %43 = add nuw nsw i64 %42, %39 + %44 = getelementptr inbounds [200 x i16], [200 x i16]* %1, i64 0, i64 %43 + %45 = load i16, i16* %44, align 2, !tbaa !5 + %46 = sext i16 %45 to i64 + %47 = getelementptr inbounds [200 x i16], [200 x i16]* %2, i64 0, i64 %42 + %48 = load i16, i16* %47, align 2, !tbaa !5 + %49 = sext i16 %48 to i64 + %50 = mul nsw i64 %49, %46 + %51 = add nsw i64 %50, %41 + %52 = add nuw nsw i64 %42, 1 + %53 = icmp eq i64 %52, 50 + br i1 %53, label %54, label %40, !llvm.loop !15 + +54: ; preds = %40 + %55 = ashr i64 %51, 15 + %56 = getelementptr inbounds [200 x i64], [200 x i64]* %3, i64 0, i64 %39 + store i64 %55, i64* %56, align 8, !tbaa !12 + %57 = add nuw nsw i64 %39, 1 + %58 = icmp eq i64 %57, 50 + br i1 %58, label %59, label %38, !llvm.loop !16 + +59: ; preds = %54, %99 + %60 = phi i64 [ %105, %99 ], [ 0, %54 ] + %61 = getelementptr inbounds [200 x i16], [200 x i16]* %1, i64 0, i64 %60 + %62 = load i16, i16* %61, align 4, !tbaa !5 + br label %63 + +63: ; preds = %63, %59 + %64 = phi i64 [ 0, %59 ], [ %97, %63 ] + %65 = phi i64 [ 0, %59 ], [ %92, %63 ] + %66 = phi i16 [ %62, %59 ], [ %85, %63 ] + %67 = phi i64 [ 0, %59 ], [ %96, %63 ] + %68 = add nuw nsw i64 %64, %60 + %69 = or i64 %68, 1 + %70 = getelementptr inbounds [200 x i16], [200 x i16]* %1, i64 0, i64 %69 + %71 = load i16, i16* %70, align 2, !tbaa !5 + %72 = getelementptr inbounds [200 x i16], [200 x i16]* %2, i64 0, i64 %64 + %73 = load i16, i16* %72, align 4, !tbaa !5 + %74 = sext i16 %66 to i32 + %75 = sext i16 %73 to i32 + %76 = mul nsw i32 %75, %74 + %77 = sext i32 %76 to i64 + %78 = add nsw i64 %65, %77 + %79 = sext i16 %71 to i32 + %80 = mul nsw i32 %75, %79 + %81 = sext i32 %80 to i64 + %82 = add nsw i64 %67, %81 + %83 = add nuw nsw i64 %68, 2 + %84 = getelementptr inbounds [200 x i16], [200 x i16]* %1, i64 0, i64 %83 + %85 = load i16, i16* %84, align 4, !tbaa !5 + %86 = or i64 %64, 1 + %87 = getelementptr inbounds [200 x i16], [200 x i16]* %2, i64 0, i64 %86 + %88 = load i16, i16* %87, align 2, !tbaa !5 + %89 = sext i16 %88 to i32 + %90 = mul nsw i32 %89, %79 + %91 = sext i32 %90 to i64 + %92 = add nsw i64 %78, %91 + %93 = sext i16 %85 to i32 + %94 = mul nsw i32 %89, %93 + %95 = sext i32 %94 to i64 + %96 = add nsw i64 %82, %95 + %97 = add nuw nsw i64 %64, 2 + %98 = icmp ult i64 %64, 30 + br i1 %98, label %63, label %99, !llvm.loop !17 + +99: ; preds = %63 + %100 = ashr i64 %92, 15 + %101 = getelementptr inbounds [200 x i64], [200 x i64]* %3, i64 0, i64 %60 + store i64 %100, i64* %101, align 16, !tbaa !12 + %102 = ashr i64 %96, 15 + %103 = or i64 %60, 1 + %104 = getelementptr inbounds [200 x i64], [200 x i64]* %3, i64 0, i64 %103 + store i64 %102, i64* %104, align 8, !tbaa !12 + %105 = add nuw nsw i64 %60, 2 + %106 = icmp ult i64 %60, 98 + br i1 %106, label %59, label %107, !llvm.loop !18 + +107: ; preds = %99 + %108 = getelementptr inbounds [200 x i16], [200 x i16]* %1, i64 0, i64 99 + %109 = load i16, i16* %108, align 2, !tbaa !5 + %110 = sext i16 %109 to i64 + %111 = getelementptr inbounds [200 x i16], [200 x i16]* %2, i64 0, i64 99 + %112 = load i16, i16* %111, align 2, !tbaa !5 + %113 = sext i16 %112 to i64 + %114 = mul nsw i64 %113, %110 + %115 = sub nsw i64 43690, %114 + br label %116 + +116: ; preds = %116, %107 + %117 = phi i64 [ %134, %116 ], [ 98, %107 ] + %118 = phi i64 [ %126, %116 ], [ %115, %107 ] + %119 = getelementptr inbounds [200 x i16], [200 x i16]* %1, i64 0, i64 %117 + %120 = load i16, i16* %119, align 2, !tbaa !5 + %121 = sext i16 %120 to i64 + %122 = getelementptr inbounds [200 x i16], [200 x i16]* %2, i64 0, i64 %117 + %123 = load i16, i16* %122, align 2, !tbaa !5 + %124 = sext i16 %123 to i64 + %125 = mul nsw i64 %124, %121 + %126 = sub nsw i64 %118, %125 + %127 = ashr i64 %126, 16 + %128 = mul nsw i64 %127, %124 + %129 = lshr i64 %128, 16 + %130 = trunc i64 %129 to i16 + %131 = add i16 %120, %130 + %132 = add nuw nsw i64 %117, 1 + %133 = getelementptr inbounds [200 x i16], [200 x i16]* %1, i64 0, i64 %132 + store i16 %131, i16* %133, align 2, !tbaa !5 + %134 = add nsw i64 %117, -1 + %135 = icmp eq i64 %117, 0 + br i1 %135, label %136, label %116, !llvm.loop !19 + +136: ; preds = %116 + %137 = getelementptr inbounds [200 x i16], [200 x i16]* %2, i64 0, i64 0 + %138 = lshr i64 %126, 16 + %139 = trunc i64 %138 to i16 + store i16 %139, i16* %7, align 16, !tbaa !5 + %140 = load i16, i16* %137, align 16, !tbaa !5 + %141 = sext i16 %140 to i64 + br label %142 + +142: ; preds = %142, %136 + %143 = phi i64 [ 0, %136 ], [ %173, %142 ] + %144 = phi i16* [ %7, %136 ], [ %171, %142 ] + %145 = phi i64 [ %141, %136 ], [ %170, %142 ] + %146 = phi i64* [ %22, %136 ], [ %172, %142 ] + %147 = getelementptr inbounds i16, i16* %144, i64 2 + %148 = load i16, i16* %147, align 2, !tbaa !5 + %149 = sext i16 %148 to i64 + %150 = load i64, i64* %146, align 8, !tbaa !12 + %151 = mul nsw i64 %150, %149 + %152 = getelementptr inbounds i16, i16* %144, i64 3 + %153 = load i16, i16* %152, align 2, !tbaa !5 + %154 = sext i16 %153 to i64 + %155 = getelementptr inbounds i64, i64* %146, i64 1 + %156 = load i64, i64* %155, align 8, !tbaa !12 + %157 = mul nsw i64 %156, %154 + %158 = add nsw i64 %157, %151 + %159 = ashr i64 %158, 15 + %160 = add nsw i64 %159, %145 + %161 = load i16, i16* %144, align 2, !tbaa !5 + %162 = sext i16 %161 to i64 + %163 = mul nsw i64 %150, %162 + %164 = getelementptr inbounds i16, i16* %144, i64 1 + %165 = load i16, i16* %164, align 2, !tbaa !5 + %166 = sext i16 %165 to i64 + %167 = mul nsw i64 %156, %166 + %168 = add nsw i64 %167, %163 + %169 = ashr i64 %168, 15 + %170 = add nsw i64 %169, %160 + store i64 %150, i64* %155, align 8, !tbaa !12 + store i64 %160, i64* %146, align 8, !tbaa !12 + %171 = getelementptr inbounds i16, i16* %144, i64 4 + %172 = getelementptr inbounds i64, i64* %146, i64 2 + %173 = add nuw nsw i64 %143, 1 + %174 = icmp eq i64 %173, 50 + br i1 %174, label %175, label %142, !llvm.loop !20 + +175: ; preds = %142 + %176 = getelementptr inbounds [200 x i64], [200 x i64]* %3, i64 0, i64 100 + store i64 %170, i64* %176, align 16, !tbaa !12 + call void @jpegdct(i16* nonnull %7, i16* nonnull %137) + call void @llvm.lifetime.end.p0i8(i64 1600, i8* nonnull %6) #5 + call void @llvm.lifetime.end.p0i8(i64 400, i8* nonnull %5) #5 + call void @llvm.lifetime.end.p0i8(i64 400, i8* nonnull %4) #5 + ret i32 0 +} + +; Function Attrs: argmemonly mustprogress nofree nounwind willreturn +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg) #4 + +attributes #0 = { nofree norecurse nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #1 = { argmemonly mustprogress nofree nosync nounwind willreturn } +attributes #2 = { nofree norecurse nosync nounwind readnone sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #3 = { nofree nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #4 = { argmemonly mustprogress nofree nounwind willreturn } +attributes #5 = { nounwind } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = !{!6, !6, i64 0} +!6 = !{!"short", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C/C++ TBAA"} +!9 = distinct !{!9, !10, !11} +!10 = !{!"llvm.loop.mustprogress"} +!11 = !{!"llvm.loop.unroll.disable"} +!12 = !{!13, !13, i64 0} +!13 = !{!"long", !7, i64 0} +!14 = distinct !{!14, !10, !11} +!15 = distinct !{!15, !10, !11} +!16 = distinct !{!16, !10, !11} +!17 = distinct !{!17, !10, !11} +!18 = distinct !{!18, !10, !11} +!19 = distinct !{!19, !10, !11} +!20 = distinct !{!20, !10, !11} +!21 = distinct !{!21, !10, !11} +!22 = distinct !{!22, !10, !11} +!23 = distinct !{!23, !10, !11} diff --git a/test/expint.ll b/test/expint.ll new file mode 100644 index 0000000..e20a034 --- /dev/null +++ b/test/expint.ll @@ -0,0 +1,140 @@ +; ModuleID = 'expint.c' +source_filename = "expint.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +; Function Attrs: nofree norecurse nosync nounwind readnone sspstrong uwtable +define dso_local void @main() local_unnamed_addr #0 { + ret void +} + +; Function Attrs: nofree norecurse nosync nounwind readnone sspstrong uwtable +define dso_local i64 @expint(i32 %0, i64 %1) local_unnamed_addr #0 { + %3 = add nsw i32 %0, -1 + %4 = icmp sgt i64 %1, 1 + br i1 %4, label %5, label %36 + +5: ; preds = %2 + %6 = sext i32 %0 to i64 + %7 = add nsw i64 %6, %1 + br label %12 + +8: ; preds = %12 + %9 = add nuw nsw i64 %13, 1 + %10 = add nuw nsw i32 %18, 1 + %11 = icmp eq i64 %9, 101 + br i1 %11, label %82, label %12, !llvm.loop !5 + +12: ; preds = %5, %8 + %13 = phi i64 [ 1, %5 ], [ %9, %8 ] + %14 = phi i64 [ 30000000, %5 ], [ %31, %8 ] + %15 = phi i64 [ 30000000, %5 ], [ %27, %8 ] + %16 = phi i64 [ 2000000, %5 ], [ %29, %8 ] + %17 = phi i64 [ %7, %5 ], [ %24, %8 ] + %18 = phi i32 [ 1, %5 ], [ %10, %8 ] + %19 = add nsw i32 %18, %3 + %20 = trunc i64 %13 to i32 + %21 = mul i32 %19, %20 + %22 = sub i32 0, %21 + %23 = sext i32 %22 to i64 + %24 = add nsw i64 %17, 2 + %25 = mul nsw i64 %15, %23 + %26 = add nsw i64 %25, %24 + %27 = mul nsw i64 %26, 10 + %28 = sdiv i64 %23, %16 + %29 = add nsw i64 %28, %24 + %30 = mul nsw i64 %29, %27 + %31 = mul nsw i64 %30, %14 + %32 = icmp slt i64 %30, 10000 + br i1 %32, label %33, label %8 + +33: ; preds = %12 + %34 = mul i64 %31, %1 + %35 = sub i64 0, %34 + br label %82 + +36: ; preds = %2 + %37 = icmp eq i32 %3, 0 + %38 = select i1 %37, i64 1000, i64 2 + %39 = icmp sgt i32 %0, 1 + %40 = add i64 %1, 8 + %41 = mul i64 %40, %1 + %42 = sub nsw i64 4, %1 + %43 = shl i64 %41, %42 + %44 = sext i32 %3 to i64 + %45 = add nsw i64 %44, 2 + %46 = add i32 %0, -2 + %47 = zext i32 %46 to i64 + %48 = mul i64 %45, %47 + %49 = add i64 %48, %44 + %50 = zext i32 %46 to i64 + %51 = add i32 %0, -3 + %52 = zext i32 %51 to i64 + %53 = mul nuw i64 %50, %52 + %54 = lshr i64 %53, 1 + %55 = add i64 %49, %54 + %56 = add i64 %55, 256 + %57 = zext i32 %3 to i64 + %58 = select i1 %39, i64 %56, i64 255 + br label %59 + +59: ; preds = %36, %77 + %60 = phi i64 [ 1, %36 ], [ %80, %77 ] + %61 = phi i64 [ %38, %36 ], [ %79, %77 ] + %62 = phi i64 [ 1, %36 ], [ %65, %77 ] + %63 = phi i64 [ -1, %36 ], [ %66, %77 ] + %64 = sdiv i64 %1, %60 + %65 = mul i64 %64, %63 + %66 = mul i64 %64, %62 + %67 = icmp eq i64 %60, %57 + br i1 %67, label %68, label %71 + +68: ; preds = %59 + %69 = mul nsw i64 %65, %43 + %70 = add nsw i64 %58, %69 + br label %77 + +71: ; preds = %59 + %72 = trunc i64 %60 to i32 + %73 = sub nsw i32 %72, %3 + %74 = sext i32 %73 to i64 + %75 = sdiv i64 %65, %74 + %76 = sub nsw i64 0, %75 + br label %77 + +77: ; preds = %68, %71 + %78 = phi i64 [ %76, %71 ], [ %70, %68 ] + %79 = add nsw i64 %78, %61 + %80 = add nuw nsw i64 %60, 1 + %81 = icmp eq i64 %80, 101 + br i1 %81, label %82, label %59, !llvm.loop !8 + +82: ; preds = %77, %8, %33 + %83 = phi i64 [ %35, %33 ], [ undef, %8 ], [ %79, %77 ] + ret i64 %83 +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone sspstrong uwtable willreturn +define dso_local i64 @foo(i64 %0) local_unnamed_addr #1 { + %2 = add i64 %0, 8 + %3 = mul i64 %2, %0 + %4 = sub nsw i64 4, %0 + %5 = shl i64 %3, %4 + ret i64 %5 +} + +attributes #0 = { nofree norecurse nosync nounwind readnone sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #1 = { mustprogress nofree norecurse nosync nounwind readnone sspstrong uwtable willreturn "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = distinct !{!5, !6, !7} +!6 = !{!"llvm.loop.mustprogress"} +!7 = !{!"llvm.loop.unroll.disable"} +!8 = distinct !{!8, !6, !7} diff --git a/test/fdct.ll b/test/fdct.ll new file mode 100644 index 0000000..e37adfc --- /dev/null +++ b/test/fdct.ll @@ -0,0 +1,257 @@ +; ModuleID = 'fdct.c' +source_filename = "fdct.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +@block = dso_local global [64 x i16] [i16 99, i16 104, i16 109, i16 113, i16 115, i16 115, i16 55, i16 55, i16 104, i16 111, i16 113, i16 116, i16 119, i16 56, i16 56, i16 56, i16 110, i16 115, i16 120, i16 119, i16 118, i16 56, i16 56, i16 56, i16 119, i16 121, i16 122, i16 120, i16 120, i16 59, i16 59, i16 59, i16 119, i16 120, i16 121, i16 122, i16 122, i16 55, i16 55, i16 55, i16 121, i16 121, i16 121, i16 121, i16 60, i16 57, i16 57, i16 57, i16 122, i16 122, i16 61, i16 63, i16 62, i16 57, i16 57, i16 57, i16 62, i16 62, i16 61, i16 61, i16 63, i16 58, i16 58, i16 58], align 16 +@out = dso_local local_unnamed_addr global i32 0, align 4 + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local void @fdct(i16* nocapture %0, i32 %1) local_unnamed_addr #0 { + %3 = sext i32 %1 to i64 + br label %18 + +4: ; preds = %18 + %5 = mul nsw i32 %1, 7 + %6 = sext i32 %5 to i64 + %7 = sext i32 %1 to i64 + %8 = mul nsw i32 %1, 6 + %9 = sext i32 %8 to i64 + %10 = shl nsw i32 %1, 1 + %11 = sext i32 %10 to i64 + %12 = mul nsw i32 %1, 5 + %13 = sext i32 %12 to i64 + %14 = mul nsw i32 %1, 3 + %15 = sext i32 %14 to i64 + %16 = shl nsw i32 %1, 2 + %17 = sext i32 %16 to i64 + br label %107 + +18: ; preds = %2, %18 + %19 = phi i16* [ %0, %2 ], [ %104, %18 ] + %20 = phi i32 [ 0, %2 ], [ %105, %18 ] + %21 = load i16, i16* %19, align 2, !tbaa !5 + %22 = sext i16 %21 to i32 + %23 = getelementptr inbounds i16, i16* %19, i64 7 + %24 = load i16, i16* %23, align 2, !tbaa !5 + %25 = sext i16 %24 to i32 + %26 = add nsw i32 %25, %22 + %27 = sub nsw i32 %22, %25 + %28 = getelementptr inbounds i16, i16* %19, i64 1 + %29 = load i16, i16* %28, align 2, !tbaa !5 + %30 = sext i16 %29 to i32 + %31 = getelementptr inbounds i16, i16* %19, i64 6 + %32 = load i16, i16* %31, align 2, !tbaa !5 + %33 = sext i16 %32 to i32 + %34 = add nsw i32 %33, %30 + %35 = sub nsw i32 %30, %33 + %36 = getelementptr inbounds i16, i16* %19, i64 2 + %37 = load i16, i16* %36, align 2, !tbaa !5 + %38 = sext i16 %37 to i32 + %39 = getelementptr inbounds i16, i16* %19, i64 5 + %40 = load i16, i16* %39, align 2, !tbaa !5 + %41 = sext i16 %40 to i32 + %42 = add nsw i32 %41, %38 + %43 = sub nsw i32 %38, %41 + %44 = getelementptr inbounds i16, i16* %19, i64 3 + %45 = load i16, i16* %44, align 2, !tbaa !5 + %46 = sext i16 %45 to i32 + %47 = getelementptr inbounds i16, i16* %19, i64 4 + %48 = load i16, i16* %47, align 2, !tbaa !5 + %49 = sext i16 %48 to i32 + %50 = add nsw i32 %49, %46 + %51 = sub nsw i32 %46, %49 + %52 = add nsw i32 %50, %26 + %53 = sub nsw i32 %26, %50 + %54 = add nsw i32 %42, %34 + %55 = sub nsw i32 %34, %42 + %56 = add nsw i32 %52, %54 + %57 = trunc i32 %56 to i16 + %58 = shl i16 %57, 2 + store i16 %58, i16* %19, align 2, !tbaa !5 + %59 = sub nsw i32 %52, %54 + %60 = trunc i32 %59 to i16 + %61 = shl i16 %60, 2 + store i16 %61, i16* %47, align 2, !tbaa !5 + %62 = add nsw i32 %53, %55 + %63 = mul nsw i32 %62, 4433 + %64 = mul nsw i32 %53, 6270 + %65 = add nsw i32 %63, %64 + %66 = lshr i32 %65, 11 + %67 = trunc i32 %66 to i16 + store i16 %67, i16* %36, align 2, !tbaa !5 + %68 = mul nsw i32 %55, -15137 + %69 = add nsw i32 %63, %68 + %70 = lshr i32 %69, 11 + %71 = trunc i32 %70 to i16 + store i16 %71, i16* %31, align 2, !tbaa !5 + %72 = add nsw i32 %51, %27 + %73 = add nsw i32 %43, %35 + %74 = add nsw i32 %51, %35 + %75 = add nsw i32 %43, %27 + %76 = add nsw i32 %74, %75 + %77 = mul nsw i32 %76, 9633 + %78 = mul nsw i32 %51, 2446 + %79 = mul nsw i32 %43, 16819 + %80 = mul nsw i32 %35, 25172 + %81 = mul nsw i32 %27, 12299 + %82 = mul nsw i32 %72, -7373 + %83 = mul nsw i32 %73, -20995 + %84 = mul nsw i32 %74, -16069 + %85 = mul nsw i32 %75, -3196 + %86 = add nsw i32 %77, %84 + %87 = add nsw i32 %77, %85 + %88 = add nsw i32 %82, %78 + %89 = add nsw i32 %88, %86 + %90 = lshr i32 %89, 11 + %91 = trunc i32 %90 to i16 + store i16 %91, i16* %23, align 2, !tbaa !5 + %92 = add nsw i32 %83, %79 + %93 = add nsw i32 %92, %87 + %94 = lshr i32 %93, 11 + %95 = trunc i32 %94 to i16 + store i16 %95, i16* %39, align 2, !tbaa !5 + %96 = add nsw i32 %83, %80 + %97 = add nsw i32 %96, %86 + %98 = lshr i32 %97, 11 + %99 = trunc i32 %98 to i16 + store i16 %99, i16* %44, align 2, !tbaa !5 + %100 = add nsw i32 %82, %81 + %101 = add nsw i32 %100, %87 + %102 = lshr i32 %101, 11 + %103 = trunc i32 %102 to i16 + store i16 %103, i16* %28, align 2, !tbaa !5 + %104 = getelementptr inbounds i16, i16* %19, i64 %3 + %105 = add nuw nsw i32 %20, 1 + %106 = icmp eq i32 %105, 8 + br i1 %106, label %4, label %18, !llvm.loop !9 + +107: ; preds = %4, %107 + %108 = phi i16* [ %0, %4 ], [ %193, %107 ] + %109 = phi i32 [ 0, %4 ], [ %194, %107 ] + %110 = load i16, i16* %108, align 2, !tbaa !5 + %111 = sext i16 %110 to i32 + %112 = getelementptr inbounds i16, i16* %108, i64 %6 + %113 = load i16, i16* %112, align 2, !tbaa !5 + %114 = sext i16 %113 to i32 + %115 = add nsw i32 %114, %111 + %116 = sub nsw i32 %111, %114 + %117 = getelementptr inbounds i16, i16* %108, i64 %7 + %118 = load i16, i16* %117, align 2, !tbaa !5 + %119 = sext i16 %118 to i32 + %120 = getelementptr inbounds i16, i16* %108, i64 %9 + %121 = load i16, i16* %120, align 2, !tbaa !5 + %122 = sext i16 %121 to i32 + %123 = add nsw i32 %122, %119 + %124 = sub nsw i32 %119, %122 + %125 = getelementptr inbounds i16, i16* %108, i64 %11 + %126 = load i16, i16* %125, align 2, !tbaa !5 + %127 = sext i16 %126 to i32 + %128 = getelementptr inbounds i16, i16* %108, i64 %13 + %129 = load i16, i16* %128, align 2, !tbaa !5 + %130 = sext i16 %129 to i32 + %131 = add nsw i32 %130, %127 + %132 = sub nsw i32 %127, %130 + %133 = getelementptr inbounds i16, i16* %108, i64 %15 + %134 = load i16, i16* %133, align 2, !tbaa !5 + %135 = sext i16 %134 to i32 + %136 = getelementptr inbounds i16, i16* %108, i64 %17 + %137 = load i16, i16* %136, align 2, !tbaa !5 + %138 = sext i16 %137 to i32 + %139 = add nsw i32 %138, %135 + %140 = sub nsw i32 %135, %138 + %141 = add nsw i32 %139, %115 + %142 = sub nsw i32 %115, %139 + %143 = add nsw i32 %131, %123 + %144 = sub nsw i32 %123, %131 + %145 = add nsw i32 %141, %143 + %146 = ashr i32 %145, 5 + %147 = trunc i32 %146 to i16 + store i16 %147, i16* %108, align 2, !tbaa !5 + %148 = sub nsw i32 %141, %143 + %149 = ashr i32 %148, 5 + %150 = trunc i32 %149 to i16 + store i16 %150, i16* %136, align 2, !tbaa !5 + %151 = add nsw i32 %142, %144 + %152 = mul nsw i32 %151, 4433 + %153 = mul nsw i32 %142, 6270 + %154 = add nsw i32 %152, %153 + %155 = ashr i32 %154, 18 + %156 = trunc i32 %155 to i16 + store i16 %156, i16* %125, align 2, !tbaa !5 + %157 = mul nsw i32 %144, -15137 + %158 = add nsw i32 %152, %157 + %159 = ashr i32 %158, 18 + %160 = trunc i32 %159 to i16 + store i16 %160, i16* %120, align 2, !tbaa !5 + %161 = add nsw i32 %140, %116 + %162 = add nsw i32 %132, %124 + %163 = add nsw i32 %140, %124 + %164 = add nsw i32 %132, %116 + %165 = add nsw i32 %163, %164 + %166 = mul nsw i32 %165, 9633 + %167 = mul nsw i32 %140, 2446 + %168 = mul nsw i32 %132, 16819 + %169 = mul nsw i32 %124, 25172 + %170 = mul nsw i32 %116, 12299 + %171 = mul nsw i32 %161, -7373 + %172 = mul nsw i32 %162, -20995 + %173 = mul nsw i32 %163, -16069 + %174 = mul nsw i32 %164, -3196 + %175 = add nsw i32 %166, %173 + %176 = add nsw i32 %166, %174 + %177 = add nsw i32 %171, %167 + %178 = add nsw i32 %177, %175 + %179 = ashr i32 %178, 18 + %180 = trunc i32 %179 to i16 + store i16 %180, i16* %112, align 2, !tbaa !5 + %181 = add nsw i32 %172, %168 + %182 = add nsw i32 %181, %176 + %183 = ashr i32 %182, 18 + %184 = trunc i32 %183 to i16 + store i16 %184, i16* %128, align 2, !tbaa !5 + %185 = add nsw i32 %172, %169 + %186 = add nsw i32 %185, %175 + %187 = ashr i32 %186, 18 + %188 = trunc i32 %187 to i16 + store i16 %188, i16* %133, align 2, !tbaa !5 + %189 = add nsw i32 %171, %170 + %190 = add nsw i32 %189, %176 + %191 = ashr i32 %190, 18 + %192 = trunc i32 %191 to i16 + store i16 %192, i16* %117, align 2, !tbaa !5 + %193 = getelementptr inbounds i16, i16* %108, i64 1 + %194 = add nuw nsw i32 %109, 1 + %195 = icmp eq i32 %194, 8 + br i1 %195, label %196, label %107, !llvm.loop !12 + +196: ; preds = %107 + ret void +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local i32 @main() local_unnamed_addr #0 { + call void @fdct(i16* getelementptr inbounds ([64 x i16], [64 x i16]* @block, i64 0, i64 0), i32 8) + %1 = load i16, i16* getelementptr inbounds ([64 x i16], [64 x i16]* @block, i64 0, i64 0), align 16, !tbaa !5 + %2 = sext i16 %1 to i32 + ret i32 %2 +} + +attributes #0 = { nofree norecurse nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = !{!6, !6, i64 0} +!6 = !{!"short", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C/C++ TBAA"} +!9 = distinct !{!9, !10, !11} +!10 = !{!"llvm.loop.mustprogress"} +!11 = !{!"llvm.loop.unroll.disable"} +!12 = distinct !{!12, !10, !11} diff --git a/test/fft1.ll b/test/fft1.ll new file mode 100644 index 0000000..162d642 --- /dev/null +++ b/test/fft1.ll @@ -0,0 +1,383 @@ +; ModuleID = 'fft1.c' +source_filename = "fft1.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +@ai = dso_local local_unnamed_addr global [8 x double] zeroinitializer, align 16 +@ar = dso_local local_unnamed_addr global [8 x double] zeroinitializer, align 16 + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local void @main() local_unnamed_addr #0 { + br label %1 + +1: ; preds = %0, %47 + %2 = phi i64 [ 0, %0 ], [ %50, %47 ] + %3 = trunc i64 %2 to i32 + %4 = sitofp i32 %3 to double + %5 = fmul double %4, 6.283180e+00 + %6 = fmul double %5, 1.250000e-01 + %7 = fsub double 0x3FF921F9F01B866E, %6 + %8 = fcmp ogt double %7, 6.283180e+00 + br i1 %8, label %12, label %9 + +9: ; preds = %12, %1 + %10 = phi double [ %7, %1 ], [ %14, %12 ] + %11 = fcmp olt double %10, -6.283180e+00 + br i1 %11, label %16, label %20 + +12: ; preds = %1, %12 + %13 = phi double [ %14, %12 ], [ %7, %1 ] + %14 = fadd double %13, -6.283180e+00 + %15 = fcmp ogt double %14, 6.283180e+00 + br i1 %15, label %12, label %9, !llvm.loop !5 + +16: ; preds = %9, %16 + %17 = phi double [ %18, %16 ], [ %10, %9 ] + %18 = fadd double %17, 6.283180e+00 + %19 = fcmp olt double %18, -6.283180e+00 + br i1 %19, label %16, label %20, !llvm.loop !8 + +20: ; preds = %16, %9 + %21 = phi double [ %10, %9 ], [ %18, %16 ] + %22 = fneg double %21 + %23 = fmul double %21, %22 + %24 = fmul double %21, %23 + %25 = fdiv double %24, 6.000000e+00 + %26 = fadd double %21, %25 + %27 = fcmp ult double %25, 0.000000e+00 + %28 = fneg double %25 + %29 = select i1 %27, double %28, double %25 + %30 = fcmp ult double %29, 1.000000e-05 + br i1 %30, label %47, label %31 + +31: ; preds = %20, %31 + %32 = phi i32 [ %42, %31 ], [ 2, %20 ] + %33 = phi double [ %40, %31 ], [ %25, %20 ] + %34 = phi double [ %41, %31 ], [ %26, %20 ] + %35 = fmul double %23, %33 + %36 = sitofp i32 %32 to double + %37 = fmul double %36, 2.000000e+00 + %38 = fadd double %37, 1.000000e+00 + %39 = fmul double %37, %38 + %40 = fdiv double %35, %39 + %41 = fadd double %34, %40 + %42 = add nuw nsw i32 %32, 1 + %43 = fcmp ult double %40, 0.000000e+00 + %44 = fneg double %40 + %45 = select i1 %43, double %44, double %40 + %46 = fcmp ult double %45, 1.000000e-05 + br i1 %46, label %47, label %31, !llvm.loop !9 + +47: ; preds = %31, %20 + %48 = phi double [ %26, %20 ], [ %41, %31 ] + %49 = getelementptr inbounds [8 x double], [8 x double]* @ar, i64 0, i64 %2 + store double %48, double* %49, align 8, !tbaa !10 + %50 = add nuw nsw i64 %2, 1 + %51 = icmp eq i64 %50, 8 + br i1 %51, label %52, label %1, !llvm.loop !14 + +52: ; preds = %47 + %53 = call i32 @fft1(i32 8, i32 0) + %54 = call i32 @fft1(i32 8, i32 1) + ret void +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local i32 @fft1(i32 %0, i32 %1) local_unnamed_addr #0 { + %3 = icmp slt i32 %0, 2 + br i1 %3, label %197, label %4 + +4: ; preds = %2 + %5 = sitofp i32 %0 to double + %6 = add nsw i32 %0, -64 + %7 = sitofp i32 %6 to double + %8 = icmp slt i32 %0, 64 + %9 = fneg double %7 + %10 = select i1 %8, double %9, double %7 + %11 = fcmp ogt double %10, 0x3EB0C6F7A0B5ED8D + br i1 %11, label %197, label %12 + +12: ; preds = %4 + %13 = icmp eq i32 %1, 1 + %14 = sext i32 %0 to i64 + br label %19 + +15: ; preds = %150 + %16 = icmp sgt i32 %0, 1 + br i1 %16, label %17, label %181 + +17: ; preds = %15 + %18 = zext i32 %0 to i64 + br label %153 + +19: ; preds = %12, %150 + %20 = phi i32 [ 0, %12 ], [ %151, %150 ] + %21 = phi i32 [ %0, %12 ], [ %22, %150 ] + %22 = sdiv i32 %21, 2 + %23 = sitofp i32 %22 to double + %24 = fdiv double 3.141590e+00, %23 + %25 = icmp sgt i32 %21, %0 + %26 = icmp sgt i32 %21, 1 + br i1 %26, label %27, label %150 + +27: ; preds = %19 + %28 = sext i32 %21 to i64 + br label %29 + +29: ; preds = %27, %147 + %30 = phi i32 [ %148, %147 ], [ 0, %27 ] + %31 = sitofp i32 %30 to double + %32 = fmul double %24, %31 + %33 = fsub double 0x3FF921F9F01B866E, %32 + %34 = fcmp ogt double %33, 6.283180e+00 + br i1 %34, label %38, label %35 + +35: ; preds = %38, %29 + %36 = phi double [ %33, %29 ], [ %40, %38 ] + %37 = fcmp olt double %36, -6.283180e+00 + br i1 %37, label %42, label %46 + +38: ; preds = %29, %38 + %39 = phi double [ %40, %38 ], [ %33, %29 ] + %40 = fadd double %39, -6.283180e+00 + %41 = fcmp ogt double %40, 6.283180e+00 + br i1 %41, label %38, label %35, !llvm.loop !5 + +42: ; preds = %35, %42 + %43 = phi double [ %44, %42 ], [ %36, %35 ] + %44 = fadd double %43, 6.283180e+00 + %45 = fcmp olt double %44, -6.283180e+00 + br i1 %45, label %42, label %46, !llvm.loop !8 + +46: ; preds = %42, %35 + %47 = phi double [ %36, %35 ], [ %44, %42 ] + %48 = fneg double %47 + %49 = fmul double %47, %48 + %50 = fmul double %47, %49 + %51 = fdiv double %50, 6.000000e+00 + %52 = fadd double %47, %51 + %53 = fcmp ult double %51, 0.000000e+00 + %54 = fneg double %51 + %55 = select i1 %53, double %54, double %51 + %56 = fcmp ult double %55, 1.000000e-05 + br i1 %56, label %73, label %57 + +57: ; preds = %46, %57 + %58 = phi i32 [ %68, %57 ], [ 2, %46 ] + %59 = phi double [ %66, %57 ], [ %51, %46 ] + %60 = phi double [ %67, %57 ], [ %52, %46 ] + %61 = fmul double %49, %59 + %62 = sitofp i32 %58 to double + %63 = fmul double %62, 2.000000e+00 + %64 = fadd double %63, 1.000000e+00 + %65 = fmul double %63, %64 + %66 = fdiv double %61, %65 + %67 = fadd double %60, %66 + %68 = add nuw nsw i32 %58, 1 + %69 = fcmp ult double %66, 0.000000e+00 + %70 = fneg double %66 + %71 = select i1 %69, double %70, double %66 + %72 = fcmp ult double %71, 1.000000e-05 + br i1 %72, label %73, label %57, !llvm.loop !9 + +73: ; preds = %57, %46 + %74 = phi double [ %52, %46 ], [ %67, %57 ] + %75 = fcmp ogt double %32, 6.283180e+00 + br i1 %75, label %79, label %76 + +76: ; preds = %79, %73 + %77 = phi double [ %32, %73 ], [ %81, %79 ] + %78 = fcmp olt double %77, -6.283180e+00 + br i1 %78, label %83, label %87 + +79: ; preds = %73, %79 + %80 = phi double [ %81, %79 ], [ %32, %73 ] + %81 = fadd double %80, -6.283180e+00 + %82 = fcmp ogt double %81, 6.283180e+00 + br i1 %82, label %79, label %76, !llvm.loop !5 + +83: ; preds = %76, %83 + %84 = phi double [ %85, %83 ], [ %77, %76 ] + %85 = fadd double %84, 6.283180e+00 + %86 = fcmp olt double %85, -6.283180e+00 + br i1 %86, label %83, label %87, !llvm.loop !8 + +87: ; preds = %83, %76 + %88 = phi double [ %77, %76 ], [ %85, %83 ] + %89 = fneg double %88 + %90 = fmul double %88, %89 + %91 = fmul double %88, %90 + %92 = fdiv double %91, 6.000000e+00 + %93 = fadd double %88, %92 + %94 = fcmp ult double %92, 0.000000e+00 + %95 = fneg double %92 + %96 = select i1 %94, double %95, double %92 + %97 = fcmp ult double %96, 1.000000e-05 + br i1 %97, label %114, label %98 + +98: ; preds = %87, %98 + %99 = phi i32 [ %109, %98 ], [ 2, %87 ] + %100 = phi double [ %107, %98 ], [ %92, %87 ] + %101 = phi double [ %108, %98 ], [ %93, %87 ] + %102 = fmul double %90, %100 + %103 = sitofp i32 %99 to double + %104 = fmul double %103, 2.000000e+00 + %105 = fadd double %104, 1.000000e+00 + %106 = fmul double %104, %105 + %107 = fdiv double %102, %106 + %108 = fadd double %101, %107 + %109 = add nuw nsw i32 %99, 1 + %110 = fcmp ult double %107, 0.000000e+00 + %111 = fneg double %107 + %112 = select i1 %110, double %111, double %107 + %113 = fcmp ult double %112, 1.000000e-05 + br i1 %113, label %114, label %98, !llvm.loop !9 + +114: ; preds = %98, %87 + %115 = phi double [ %93, %87 ], [ %108, %98 ] + %116 = fneg double %115 + %117 = select i1 %13, double %115, double %116 + %118 = sub nsw i32 %30, %21 + br i1 %25, label %147, label %119 + +119: ; preds = %114, %119 + %120 = phi i64 [ %144, %119 ], [ %28, %114 ] + %121 = phi i32 [ %145, %119 ], [ %21, %114 ] + %122 = add nsw i32 %121, %118 + %123 = add nsw i32 %122, %22 + %124 = sext i32 %122 to i64 + %125 = getelementptr inbounds [8 x double], [8 x double]* @ar, i64 0, i64 %124 + %126 = load double, double* %125, align 8, !tbaa !10 + %127 = sext i32 %123 to i64 + %128 = getelementptr inbounds [8 x double], [8 x double]* @ar, i64 0, i64 %127 + %129 = load double, double* %128, align 8, !tbaa !10 + %130 = getelementptr inbounds [8 x double], [8 x double]* @ai, i64 0, i64 %124 + %131 = load double, double* %130, align 8, !tbaa !10 + %132 = getelementptr inbounds [8 x double], [8 x double]* @ai, i64 0, i64 %127 + %133 = load double, double* %132, align 8, !tbaa !10 + %134 = fsub double %126, %129 + %135 = fsub double %131, %133 + %136 = fadd double %126, %129 + store double %136, double* %125, align 8, !tbaa !10 + %137 = fadd double %131, %133 + store double %137, double* %130, align 8, !tbaa !10 + %138 = fmul double %74, %134 + %139 = fmul double %117, %135 + %140 = fsub double %138, %139 + store double %140, double* %128, align 8, !tbaa !10 + %141 = fmul double %74, %135 + %142 = fmul double %117, %134 + %143 = fadd double %142, %141 + store double %143, double* %132, align 8, !tbaa !10 + %144 = add i64 %120, %28 + %145 = add nsw i32 %121, %21 + %146 = icmp sgt i64 %144, %14 + br i1 %146, label %147, label %119, !llvm.loop !15 + +147: ; preds = %119, %114 + %148 = add nuw nsw i32 %30, 1 + %149 = icmp eq i32 %148, %22 + br i1 %149, label %150, label %29, !llvm.loop !16 + +150: ; preds = %147, %19 + %151 = add nuw nsw i32 %20, 1 + %152 = icmp eq i32 %151, 6 + br i1 %152, label %15, label %19, !llvm.loop !17 + +153: ; preds = %17, %177 + %154 = phi i64 [ 1, %17 ], [ %179, %177 ] + %155 = phi i32 [ 1, %17 ], [ %178, %177 ] + %156 = sext i32 %155 to i64 + %157 = icmp slt i64 %154, %156 + br i1 %157, label %158, label %170 + +158: ; preds = %153 + %159 = add nsw i32 %155, -1 + %160 = sext i32 %159 to i64 + %161 = getelementptr inbounds [8 x double], [8 x double]* @ar, i64 0, i64 %160 + %162 = load double, double* %161, align 8, !tbaa !10 + %163 = getelementptr inbounds [8 x double], [8 x double]* @ai, i64 0, i64 %160 + %164 = load double, double* %163, align 8, !tbaa !10 + %165 = add nsw i64 %154, -1 + %166 = getelementptr inbounds [8 x double], [8 x double]* @ar, i64 0, i64 %165 + %167 = load double, double* %166, align 8, !tbaa !10 + store double %167, double* %161, align 8, !tbaa !10 + %168 = getelementptr inbounds [8 x double], [8 x double]* @ai, i64 0, i64 %165 + %169 = load double, double* %168, align 8, !tbaa !10 + store double %169, double* %163, align 8, !tbaa !10 + store double %162, double* %166, align 8, !tbaa !10 + store double %164, double* %168, align 8, !tbaa !10 + br label %170 + +170: ; preds = %158, %153 + br label %171 + +171: ; preds = %170, %171 + %172 = phi i32 [ %174, %171 ], [ %0, %170 ] + %173 = phi i32 [ %176, %171 ], [ %155, %170 ] + %174 = sdiv i32 %172, 2 + %175 = icmp sgt i32 %173, %174 + %176 = sub nsw i32 %173, %174 + br i1 %175, label %171, label %177, !llvm.loop !18 + +177: ; preds = %171 + %178 = add nsw i32 %173, %174 + %179 = add nuw nsw i64 %154, 1 + %180 = icmp eq i64 %179, %18 + br i1 %180, label %181, label %153, !llvm.loop !19 + +181: ; preds = %177, %15 + %182 = icmp ne i32 %1, 0 + %183 = icmp sgt i32 %0, 0 + %184 = select i1 %182, i1 %183, i1 false + br i1 %184, label %185, label %197 + +185: ; preds = %181 + %186 = zext i32 %0 to i64 + br label %187 + +187: ; preds = %185, %187 + %188 = phi i64 [ 0, %185 ], [ %195, %187 ] + %189 = getelementptr inbounds [8 x double], [8 x double]* @ar, i64 0, i64 %188 + %190 = load double, double* %189, align 8, !tbaa !10 + %191 = fdiv double %190, %5 + store double %191, double* %189, align 8, !tbaa !10 + %192 = getelementptr inbounds [8 x double], [8 x double]* @ai, i64 0, i64 %188 + %193 = load double, double* %192, align 8, !tbaa !10 + %194 = fdiv double %193, %5 + store double %194, double* %192, align 8, !tbaa !10 + %195 = add nuw nsw i64 %188, 1 + %196 = icmp eq i64 %195, %186 + br i1 %196, label %197, label %187, !llvm.loop !20 + +197: ; preds = %187, %181, %4, %2 + %198 = phi i32 [ 999, %2 ], [ 1, %4 ], [ 0, %181 ], [ 0, %187 ] + ret i32 %198 +} + +attributes #0 = { nofree norecurse nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = distinct !{!5, !6, !7} +!6 = !{!"llvm.loop.mustprogress"} +!7 = !{!"llvm.loop.unroll.disable"} +!8 = distinct !{!8, !6, !7} +!9 = distinct !{!9, !6, !7} +!10 = !{!11, !11, i64 0} +!11 = !{!"double", !12, i64 0} +!12 = !{!"omnipotent char", !13, i64 0} +!13 = !{!"Simple C/C++ TBAA"} +!14 = distinct !{!14, !6, !7} +!15 = distinct !{!15, !6, !7} +!16 = distinct !{!16, !6, !7} +!17 = distinct !{!17, !6, !7} +!18 = distinct !{!18, !6, !7} +!19 = distinct !{!19, !6, !7} +!20 = distinct !{!20, !6, !7} diff --git a/test/fibcall.ll b/test/fibcall.ll new file mode 100644 index 0000000..d653253 --- /dev/null +++ b/test/fibcall.ll @@ -0,0 +1,52 @@ +; ModuleID = 'fibcall.c' +source_filename = "fibcall.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +; Function Attrs: nofree norecurse nosync nounwind readnone sspstrong uwtable +define dso_local i32 @fib(i32 %0) local_unnamed_addr #0 { + %2 = icmp sgt i32 %0, 1 + br i1 %2, label %3, label %14 + +3: ; preds = %1 + %4 = add i32 %0, -2 + %5 = call i32 @llvm.umin.i32(i32 %4, i32 28) + %6 = add nuw nsw i32 %5, 2 + br label %7 + +7: ; preds = %3, %7 + %8 = phi i32 [ %12, %7 ], [ 2, %3 ] + %9 = phi i32 [ %11, %7 ], [ 1, %3 ] + %10 = phi i32 [ %9, %7 ], [ 0, %3 ] + %11 = add nsw i32 %9, %10 + %12 = add nuw nsw i32 %8, 1 + %13 = icmp eq i32 %8, %6 + br i1 %13, label %14, label %7, !llvm.loop !5 + +14: ; preds = %7, %1 + %15 = phi i32 [ 1, %1 ], [ %11, %7 ] + ret i32 %15 +} + +; Function Attrs: nofree norecurse nosync nounwind readnone sspstrong uwtable +define dso_local i32 @main() local_unnamed_addr #0 { + ret i32 30 +} + +; Function Attrs: nofree nosync nounwind readnone speculatable willreturn +declare i32 @llvm.umin.i32(i32, i32) #1 + +attributes #0 = { nofree norecurse nosync nounwind readnone sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #1 = { nofree nosync nounwind readnone speculatable willreturn } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = distinct !{!5, !6, !7} +!6 = !{!"llvm.loop.mustprogress"} +!7 = !{!"llvm.loop.unroll.disable"} diff --git a/test/fir.ll b/test/fir.ll new file mode 100644 index 0000000..c3305f3 --- /dev/null +++ b/test/fir.ll @@ -0,0 +1,163 @@ +; ModuleID = 'fir.c' +source_filename = "fir.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +@fir_int = dso_local local_unnamed_addr global [36 x i64] [i64 4294967294, i64 1, i64 4, i64 3, i64 4294967294, i64 4294967292, i64 2, i64 7, i64 0, i64 4294967287, i64 4294967292, i64 12, i64 11, i64 4294967282, i64 4294967270, i64 15, i64 89, i64 127, i64 89, i64 15, i64 4294967270, i64 4294967282, i64 11, i64 12, i64 4294967292, i64 4294967287, i64 0, i64 7, i64 2, i64 4294967292, i64 4294967294, i64 3, i64 4, i64 1, i64 4294967294, i64 0], align 16 +@in_data = dso_local global [701 x i64] [i64 0, i64 0, i64 0, i64 0, i64 127, i64 121, i64 114, i64 121, i64 13, i64 13, i64 0, i64 3, i64 5, i64 2, i64 3, i64 127, i64 127, i64 2, i64 126, i64 0, i64 1, i64 126, i64 1, i64 1, i64 127, i64 0, i64 127, i64 0, i64 2, i64 1, i64 1, i64 3, i64 1, i64 127, i64 1, i64 0, i64 1, i64 1, i64 125, i64 123, i64 115, i64 106, i64 119, i64 16, i64 14, i64 1, i64 5, i64 5, i64 5, i64 5, i64 125, i64 0, i64 2, i64 125, i64 0, i64 0, i64 126, i64 1, i64 126, i64 127, i64 3, i64 124, i64 126, i64 6, i64 0, i64 126, i64 3, i64 2, i64 127, i64 126, i64 127, i64 2, i64 1, i64 127, i64 1, i64 1, i64 0, i64 3, i64 0, i64 127, i64 2, i64 0, i64 127, i64 3, i64 1, i64 0, i64 0, i64 125, i64 0, i64 3, i64 0, i64 126, i64 127, i64 2, i64 1, i64 126, i64 0, i64 3, i64 127, i64 125, i64 1, i64 1, i64 1, i64 127, i64 0, i64 5, i64 0, i64 127, i64 2, i64 126, i64 127, i64 2, i64 1, i64 0, i64 126, i64 0, i64 5, i64 0, i64 127, i64 0, i64 126, i64 1, i64 0, i64 125, i64 1, i64 3, i64 127, i64 0, i64 0, i64 126, i64 2, i64 3, i64 126, i64 125, i64 114, i64 104, i64 113, i64 5, i64 12, i64 7, i64 2, i64 6, i64 13, i64 5, i64 125, i64 3, i64 2, i64 127, i64 0, i64 121, i64 122, i64 3, i64 126, i64 125, i64 0, i64 125, i64 2, i64 1, i64 125, i64 8, i64 3, i64 124, i64 6, i64 0, i64 122, i64 6, i64 2, i64 124, i64 3, i64 126, i64 121, i64 6, i64 5, i64 116, i64 127, i64 13, i64 122, i64 120, i64 6, i64 5, i64 1, i64 0, i64 125, i64 1, i64 4, i64 124, i64 127, i64 3, i64 127, i64 5, i64 3, i64 122, i64 6, i64 10, i64 118, i64 124, i64 10, i64 124, i64 127, i64 6, i64 121, i64 3, i64 12, i64 117, i64 120, i64 10, i64 0, i64 121, i64 3, i64 126, i64 124, i64 6, i64 0, i64 121, i64 2, i64 126, i64 127, i64 6, i64 118, i64 127, i64 13, i64 121, i64 127, i64 6, i64 121, i64 6, i64 3, i64 113, i64 6, i64 10, i64 115, i64 127, i64 10, i64 0, i64 127, i64 122, i64 124, i64 10, i64 0, i64 117, i64 127, i64 12, i64 10, i64 124, i64 121, i64 9, i64 13, i64 125, i64 122, i64 5, i64 11, i64 10, i64 121, i64 124, i64 22, i64 3, i64 114, i64 13, i64 7, i64 121, i64 12, i64 7, i64 122, i64 11, i64 7, i64 122, i64 10, i64 7, i64 121, i64 10, i64 5, i64 117, i64 6, i64 5, i64 121, i64 5, i64 6, i64 1, i64 6, i64 0, i64 122, i64 2, i64 7, i64 3, i64 125, i64 1, i64 10, i64 7, i64 2, i64 127, i64 127, i64 9, i64 7, i64 121, i64 121, i64 6, i64 8, i64 125, i64 122, i64 6, i64 12, i64 6, i64 125, i64 127, i64 13, i64 7, i64 121, i64 1, i64 6, i64 127, i64 127, i64 2, i64 3, i64 1, i64 126, i64 1, i64 1, i64 125, i64 1, i64 0, i64 125, i64 6, i64 3, i64 125, i64 5, i64 7, i64 127, i64 124, i64 1, i64 6, i64 6, i64 124, i64 122, i64 7, i64 10, i64 0, i64 120, i64 1, i64 8, i64 0, i64 121, i64 122, i64 4, i64 10, i64 0, i64 120, i64 1, i64 6, i64 122, i64 117, i64 122, i64 0, i64 0, i64 121, i64 118, i64 127, i64 7, i64 0, i64 122, i64 125, i64 2, i64 4, i64 124, i64 122, i64 2, i64 5, i64 124, i64 122, i64 125, i64 127, i64 0, i64 120, i64 117, i64 127, i64 0, i64 121, i64 120, i64 121, i64 1, i64 3, i64 121, i64 121, i64 0, i64 0, i64 127, i64 127, i64 121, i64 127, i64 2, i64 122, i64 124, i64 125, i64 124, i64 127, i64 125, i64 121, i64 125, i64 0, i64 121, i64 122, i64 124, i64 125, i64 0, i64 125, i64 125, i64 0, i64 0, i64 0, i64 0, i64 125, i64 125, i64 0, i64 125, i64 126, i64 0, i64 126, i64 3, i64 3, i64 125, i64 1, i64 5, i64 0, i64 126, i64 125, i64 127, i64 3, i64 125, i64 121, i64 1, i64 2, i64 125, i64 127, i64 1, i64 0, i64 0, i64 127, i64 127, i64 126, i64 127, i64 0, i64 127, i64 0, i64 124, i64 125, i64 0, i64 121, i64 120, i64 124, i64 124, i64 123, i64 123, i64 125, i64 127, i64 0, i64 0, i64 127, i64 0, i64 1, i64 2, i64 0, i64 127, i64 0, i64 0, i64 0, i64 127, i64 126, i64 0, i64 0, i64 127, i64 0, i64 2, i64 1, i64 2, i64 6, i64 5, i64 3, i64 6, i64 8, i64 5, i64 2, i64 1, i64 1, i64 3, i64 0, i64 125, i64 127, i64 0, i64 127, i64 126, i64 0, i64 2, i64 3, i64 2, i64 1, i64 2, i64 3, i64 1, i64 124, i64 125, i64 0, i64 0, i64 126, i64 124, i64 127, i64 1, i64 0, i64 126, i64 124, i64 127, i64 1, i64 0, i64 126, i64 127, i64 2, i64 3, i64 1, i64 0, i64 4, i64 6, i64 5, i64 6, i64 7, i64 10, i64 10, i64 4, i64 2, i64 5, i64 8, i64 9, i64 8, i64 7, i64 12, i64 20, i64 20, i64 16, i64 14, i64 20, i64 21, i64 15, i64 9, i64 7, i64 4, i64 126, i64 118, i64 100, i64 65, i64 72, i64 125, i64 108, i64 61, i64 103, i64 16, i64 6, i64 125, i64 117, i64 7, i64 29, i64 0, i64 108, i64 2, i64 125, i64 120, i64 119, i64 111, i64 119, i64 1, i64 0, i64 2, i64 7, i64 10, i64 28, i64 28, i64 23, i64 35, i64 47, i64 65, i64 67, i64 79, i64 85, i64 88, i64 126, i64 2, i64 76, i64 16, i64 105, i64 44, i64 13, i64 116, i64 42, i64 116, i64 99, i64 41, i64 124, i64 94, i64 33, i64 53, i64 70, i64 36, i64 103, i64 53, i64 60, i64 60, i64 38, i64 38, i64 47, i64 71, i64 100, i64 4, i64 19, i64 24, i64 39, i64 43, i64 48, i64 27, i64 127, i64 120, i64 114, i64 104, i64 92, i64 90, i64 104, i64 124, i64 3, i64 13, i64 38, i64 65, i64 81, i64 90, i64 106, i64 108, i64 84, i64 120, i64 9, i64 69, i64 121, i64 31, i64 11, i64 46, i64 96, i64 11, i64 102, i64 127, i64 104, i64 119, i64 78, i64 70, i64 74, i64 59, i64 18, i64 91, i64 55, i64 49, i64 33, i64 11, i64 18, i64 46, i64 87, i64 126, i64 25, i64 34, i64 43, i64 63, i64 58, i64 37, i64 11, i64 121, i64 113, i64 104, i64 97, i64 92, i64 102, i64 114, i64 6, i64 22, i64 41, i64 65, i64 94, i64 109, i64 102, i64 96, i64 110, i64 23, i64 72, i64 54, i64 18, i64 23, i64 47, i64 99, i64 120, i64 92, i64 119, i64 108, i64 117, i64 65, i64 73, i64 79, i64 59, i64 11, i64 84, i64 55, i64 0], align 16 +@out_data = dso_local local_unnamed_addr global <{ [700 x i64], [20 x i64] }> <{ [700 x i64] [i64 3, i64 4294967290, i64 4294967293, i64 29, i64 88, i64 137, i64 135, i64 86, i64 32, i64 7, i64 7, i64 4, i64 4294967289, i64 0, i64 40, i64 91, i64 107, i64 79, i64 43, i64 33, i64 45, i64 48, i64 39, i64 39, i64 55, i64 71, i64 66, i64 39, i64 8, i64 4294967284, i64 4294967285, i64 13, i64 46, i64 59, i64 37, i64 0, i64 4294967288, i64 29, i64 89, i64 131, i64 135, i64 111, i64 78, i64 47, i64 18, i64 4294967295, i64 4294967291, i64 4, i64 21, i64 35, i64 45, i64 49, i64 47, i64 41, i64 38, i64 42, i64 54, i64 72, i64 88, i64 95, i64 90, i64 79, i64 70, i64 65, i64 50, i64 27, i64 23, i64 55, i64 105, i64 123, i64 89, i64 47, i64 36, i64 48, i64 42, i64 8, i64 4294967286, i64 7, i64 36, i64 49, i64 47, i64 51, i64 50, i64 30, i64 4, i64 7, i64 35, i64 51, i64 33, i64 14, i64 30, i64 74, i64 97, i64 75, i64 33, i64 14, i64 34, i64 73, i64 94, i64 77, i64 37, i64 11, i64 24, i64 50, i64 51, i64 21, i64 5, i64 41, i64 100, i64 118, i64 77, i64 22, i64 9, i64 38, i64 55, i64 35, i64 11, i64 21, i64 60, i64 82, i64 64, i64 35, i64 29, i64 45, i64 54, i64 45, i64 36, i64 41, i64 50, i64 44, i64 33, i64 43, i64 80, i64 123, i64 141, i64 115, i64 71, i64 34, i64 15, i64 7, i64 4294967295, i64 0, i64 19, i64 45, i64 54, i64 43, i64 35, i64 50, i64 78, i64 92, i64 85, i64 79, i64 85, i64 92, i64 80, i64 52, i64 32, i64 34, i64 50, i64 56, i64 47, i64 37, i64 42, i64 53, i64 50, i64 35, i64 31, i64 54, i64 87, i64 96, i64 76, i64 49, i64 45, i64 64, i64 87, i64 103, i64 103, i64 76, i64 33, i64 4, i64 8, i64 32, i64 48, i64 44, i64 51, i64 78, i64 97, i64 86, i64 57, i64 38, i64 38, i64 43, i64 46, i64 56, i64 74, i64 87, i64 88, i64 92, i64 95, i64 80, i64 49, i64 29, i64 49, i64 88, i64 93, i64 55, i64 22, i64 35, i64 85, i64 113, i64 86, i64 40, i64 24, i64 48, i64 81, i64 96, i64 92, i64 82, i64 79, i64 84, i64 94, i64 98, i64 87, i64 69, i64 58, i64 53, i64 38, i64 23, i64 35, i64 71, i64 93, i64 72, i64 39, i64 48, i64 97, i64 121, i64 90, i64 49, i64 45, i64 69, i64 79, i64 65, i64 62, i64 72, i64 72, i64 58, i64 61, i64 83, i64 85, i64 47, i64 13, i64 31, i64 85, i64 105, i64 71, i64 30, i64 28, i64 50, i64 60, i64 49, i64 40, i64 45, i64 52, i64 50, i64 46, i64 46, i64 47, i64 45, i64 47, i64 50, i64 47, i64 38, i64 35, i64 48, i64 61, i64 44, i64 3, i64 4294967279, i64 10, i64 52, i64 57, i64 24, i64 10, i64 40, i64 66, i64 40, i64 4294967291, i64 4294967294, i64 55, i64 97, i64 83, i64 50, i64 53, i64 75, i64 76, i64 54, i64 54, i64 78, i64 86, i64 51, i64 14, i64 27, i64 78, i64 105, i64 81, i64 34, i64 13, i64 36, i64 75, i64 94, i64 77, i64 42, i64 18, i64 22, i64 41, i64 53, i64 51, i64 42, i64 37, i64 38, i64 47, i64 56, i64 49, i64 29, i64 29, i64 66, i64 104, i64 88, i64 26, i64 4294967295, i64 42, i64 99, i64 95, i64 39, i64 10, i64 34, i64 52, i64 30, i64 11, i64 39, i64 88, i64 90, i64 46, i64 16, i64 27, i64 40, i64 35, i64 49, i64 96, i64 124, i64 86, i64 27, i64 29, i64 93, i64 129, i64 92, i64 41, i64 44, i64 78, i64 81, i64 53, i64 51, i64 77, i64 83, i64 50, i64 36, i64 80, i64 134, i64 133, i64 90, i64 70, i64 93, i64 107, i64 91, i64 79, i64 99, i64 113, i64 84, i64 42, i64 44, i64 80, i64 86, i64 48, i64 30, i64 77, i64 141, i64 144, i64 91, i64 58, i64 85, i64 128, i64 137, i64 120, i64 123, i64 138, i64 125, i64 83, i64 62, i64 91, i64 131, i64 127, i64 89, i64 74, i64 91, i64 94, i64 46, i64 4294967284, i64 4294967283, i64 45, i64 95, i64 97, i64 80, i64 84, i64 94, i64 80, i64 48, i64 38, i64 52, i64 50, i64 24, i64 9, i64 39, i64 91, i64 116, i64 109, i64 94, i64 82, i64 64, i64 45, i64 52, i64 84, i64 92, i64 49, i64 0, i64 10, i64 86, i64 156, i64 150, i64 89, i64 46, i64 56, i64 87, i64 94, i64 75, i64 70, i64 94, i64 120, i64 124, i64 119, i64 128, i64 141, i64 125, i64 79, i64 43, i64 43, i64 51, i64 30, i64 0, i64 6, i64 40, i64 55, i64 29, i64 9, i64 36, i64 83, i64 93, i64 61, i64 31, i64 33, i64 41, i64 24, i64 4294967292, i64 4294967285, i64 6, i64 18, i64 9, i64 4294967293, i64 1, i64 15, i64 12, i64 4294967290, i64 4294967282, i64 9, i64 50, i64 77, i64 86, i64 92, i64 98, i64 83, i64 39, i64 0, i64 4294967292, i64 12, i64 8, i64 4294967280, i64 4294967289, i64 54, i64 106, i64 85, i64 28, i64 27, i64 96, i64 142, i64 97, i64 21, i64 20, i64 94, i64 140, i64 97, i64 29, i64 26, i64 82, i64 107, i64 61, i64 4294967291, i64 4294967272, i64 1, i64 21, i64 12, i64 4294967294, i64 0, i64 13, i64 17, i64 9, i64 1, i64 1, i64 7, i64 12, i64 11, i64 7, i64 6, i64 13, i64 22, i64 23, i64 16, i64 12, i64 19, i64 28, i64 19, i64 0, i64 0, i64 38, i64 95, i64 123, i64 104, i64 72, i64 72, i64 104, i64 125, i64 96, i64 45, i64 25, i64 55, i64 92, i64 90, i64 49, i64 15, i64 19, i64 49, i64 76, i64 94, i64 113, i64 131, i64 127, i64 88, i64 32, i64 4294967293, i64 4294967288, i64 9, i64 24, i64 25, i64 22, i64 25, i64 39, i64 55, i64 61, i64 66, i64 79, i64 98, i64 101, i64 79, i64 51, i64 44, i64 54, i64 61, i64 56, i64 52, i64 64, i64 84, i64 93, i64 91, i64 88, i64 89, i64 83, i64 65, i64 50, i64 51, i64 63, i64 74, i64 75, i64 67, i64 51, i64 37, i64 40, i64 61, i64 79, i64 68, i64 35, i64 14, i64 22, i64 41, i64 44, i64 40, i64 57, i64 99, i64 132, i64 125, i64 95, i64 86, i64 105, i64 115, i64 86, i64 36, i64 12, i64 30, i64 64, i64 86, i64 96, i64 105, i64 110, i64 99, i64 78, i64 66, i64 68, i64 71, i64 59, i64 42, i64 34, i64 45, i64 69, i64 93, i64 112, i64 119, i64 109, i64 91, i64 74, i64 63, i64 55, i64 50, i64 57, i64 67, i64 61, i64 32, i64 5, i64 16, i64 63, i64 100, i64 90, i64 52, i64 33, i64 46, i64 62, i64 51, i64 34, i64 48, i64 89, i64 117, i64 113, i64 96, i64 97, i64 103, i64 85, i64 45, i64 18, i64 29, i64 67, i64 101, i64 113, i64 108, i64 95, i64 83, i64 71, i64 57, i64 41, i64 28, i64 30, i64 53, i64 86, i64 111, i64 116, i64 111, i64 106, i64 102, i64 92, i64 75, i64 58, i64 51, i64 54, i64 56, i64 44], [20 x i64] zeroinitializer }>, align 16 + +; Function Attrs: nofree nosync nounwind sspstrong uwtable +define dso_local i32 @main() local_unnamed_addr #0 { + %1 = alloca [720 x i64], align 16 + %2 = bitcast [720 x i64]* %1 to i8* + call void @llvm.lifetime.start.p0i8(i64 5760, i8* nonnull %2) #3 + %3 = getelementptr inbounds [720 x i64], [720 x i64]* %1, i64 0, i64 0 + br label %4 + +4: ; preds = %28, %0 + %5 = phi i64* [ %33, %28 ], [ %3, %0 ] + %6 = phi i64* [ %42, %28 ], [ getelementptr inbounds ([36 x i64], [36 x i64]* @fir_int, i64 0, i64 0), %0 ] + %7 = phi i64* [ %41, %28 ], [ getelementptr inbounds ([701 x i64], [701 x i64]* @in_data, i64 0, i64 17), %0 ] + %8 = phi i64 [ %38, %28 ], [ 18, %0 ] + %9 = phi i64 [ %43, %28 ], [ 0, %0 ] + %10 = getelementptr inbounds i64, i64* %6, i64 1 + %11 = load i64, i64* %6, align 8, !tbaa !5 + %12 = load i64, i64* %7, align 8, !tbaa !5 + %13 = mul nsw i64 %12, %11 + %14 = icmp sgt i64 %8, 1 + br i1 %14, label %15, label %28 + +15: ; preds = %4, %15 + %16 = phi i64* [ %20, %15 ], [ %7, %4 ] + %17 = phi i64* [ %21, %15 ], [ %10, %4 ] + %18 = phi i64 [ %25, %15 ], [ %13, %4 ] + %19 = phi i64 [ %26, %15 ], [ 1, %4 ] + %20 = getelementptr inbounds i64, i64* %16, i64 -1 + %21 = getelementptr inbounds i64, i64* %17, i64 1 + %22 = load i64, i64* %17, align 8, !tbaa !5 + %23 = load i64, i64* %20, align 8, !tbaa !5 + %24 = mul nsw i64 %23, %22 + %25 = add nsw i64 %24, %18 + %26 = add nuw nsw i64 %19, 1 + %27 = icmp eq i64 %26, %8 + br i1 %27, label %28, label %15, !llvm.loop !9 + +28: ; preds = %15, %4 + %29 = phi i64 [ %13, %4 ], [ %25, %15 ] + %30 = sdiv i64 %29, 285 + %31 = shl i64 %30, 32 + %32 = ashr exact i64 %31, 32 + %33 = getelementptr inbounds i64, i64* %5, i64 1 + store i64 %32, i64* %5, align 8, !tbaa !5 + %34 = icmp eq i64* %7, getelementptr inbounds ([701 x i64], [701 x i64]* @in_data, i64 0, i64 699) + %35 = icmp slt i64 %8, 35 + %36 = zext i1 %35 to i64 + %37 = select i1 %34, i64 -1, i64 %36 + %38 = add nsw i64 %37, %8 + %39 = xor i1 %34, true + %40 = zext i1 %39 to i64 + %41 = getelementptr i64, i64* %7, i64 %40 + %42 = select i1 %34, i64* %10, i64* %6 + %43 = add nuw nsw i64 %9, 1 + %44 = icmp eq i64 %43, 700 + br i1 %44, label %45, label %4, !llvm.loop !12 + +45: ; preds = %28 + call void @llvm.lifetime.end.p0i8(i64 5760, i8* nonnull %2) #3 + ret i32 0 +} + +; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn +declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #1 + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local void @fir_filter_int(i64* readonly %0, i64* nocapture %1, i64 %2, i64* nocapture readonly %3, i64 %4, i64 %5) local_unnamed_addr #2 { + %7 = add nsw i64 %4, 1 + %8 = ashr i64 %7, 1 + %9 = add nsw i64 %2, -1 + %10 = getelementptr inbounds i64, i64* %0, i64 %9 + %11 = icmp sgt i64 %2, 0 + br i1 %11, label %12, label %56 + +12: ; preds = %6 + %13 = add nsw i64 %8, -1 + %14 = getelementptr inbounds i64, i64* %0, i64 %13 + br label %15 + +15: ; preds = %12, %39 + %16 = phi i64* [ %44, %39 ], [ %1, %12 ] + %17 = phi i64* [ %53, %39 ], [ %3, %12 ] + %18 = phi i64* [ %52, %39 ], [ %14, %12 ] + %19 = phi i64 [ %49, %39 ], [ %8, %12 ] + %20 = phi i64 [ %54, %39 ], [ 0, %12 ] + %21 = getelementptr inbounds i64, i64* %17, i64 1 + %22 = load i64, i64* %17, align 8, !tbaa !5 + %23 = load i64, i64* %18, align 8, !tbaa !5 + %24 = mul nsw i64 %23, %22 + %25 = icmp sgt i64 %19, 1 + br i1 %25, label %26, label %39 + +26: ; preds = %15, %26 + %27 = phi i64* [ %31, %26 ], [ %18, %15 ] + %28 = phi i64* [ %32, %26 ], [ %21, %15 ] + %29 = phi i64 [ %36, %26 ], [ %24, %15 ] + %30 = phi i64 [ %37, %26 ], [ 1, %15 ] + %31 = getelementptr inbounds i64, i64* %27, i64 -1 + %32 = getelementptr inbounds i64, i64* %28, i64 1 + %33 = load i64, i64* %28, align 8, !tbaa !5 + %34 = load i64, i64* %31, align 8, !tbaa !5 + %35 = mul nsw i64 %34, %33 + %36 = add nsw i64 %35, %29 + %37 = add nuw nsw i64 %30, 1 + %38 = icmp eq i64 %37, %19 + br i1 %38, label %39, label %26, !llvm.loop !9 + +39: ; preds = %26, %15 + %40 = phi i64 [ %24, %15 ], [ %36, %26 ] + %41 = sdiv i64 %40, %5 + %42 = shl i64 %41, 32 + %43 = ashr exact i64 %42, 32 + %44 = getelementptr inbounds i64, i64* %16, i64 1 + store i64 %43, i64* %16, align 8, !tbaa !5 + %45 = icmp eq i64* %18, %10 + %46 = icmp slt i64 %19, %4 + %47 = zext i1 %46 to i64 + %48 = select i1 %45, i64 -1, i64 %47 + %49 = add nsw i64 %48, %19 + %50 = xor i1 %45, true + %51 = zext i1 %50 to i64 + %52 = getelementptr i64, i64* %18, i64 %51 + %53 = select i1 %45, i64* %21, i64* %17 + %54 = add nuw nsw i64 %20, 1 + %55 = icmp eq i64 %54, %2 + br i1 %55, label %56, label %15, !llvm.loop !12 + +56: ; preds = %39, %6 + ret void +} + +; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn +declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) #1 + +attributes #0 = { nofree nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #1 = { argmemonly mustprogress nofree nosync nounwind willreturn } +attributes #2 = { nofree norecurse nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #3 = { nounwind } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = !{!6, !6, i64 0} +!6 = !{!"long", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C/C++ TBAA"} +!9 = distinct !{!9, !10, !11} +!10 = !{!"llvm.loop.mustprogress"} +!11 = !{!"llvm.loop.unroll.disable"} +!12 = distinct !{!12, !10, !11} diff --git a/test/insertsort.ll b/test/insertsort.ll new file mode 100644 index 0000000..2a7244b --- /dev/null +++ b/test/insertsort.ll @@ -0,0 +1,77 @@ +; ModuleID = 'insertsort.c' +source_filename = "insertsort.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +@a = dso_local local_unnamed_addr global [11 x i32] zeroinitializer, align 16 + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local i32 @main() local_unnamed_addr #0 { + store i32 0, i32* getelementptr inbounds ([11 x i32], [11 x i32]* @a, i64 0, i64 0), align 16, !tbaa !5 + store i32 11, i32* getelementptr inbounds ([11 x i32], [11 x i32]* @a, i64 0, i64 1), align 4, !tbaa !5 + store i32 10, i32* getelementptr inbounds ([11 x i32], [11 x i32]* @a, i64 0, i64 2), align 8, !tbaa !5 + store i32 9, i32* getelementptr inbounds ([11 x i32], [11 x i32]* @a, i64 0, i64 3), align 4, !tbaa !5 + store i32 8, i32* getelementptr inbounds ([11 x i32], [11 x i32]* @a, i64 0, i64 4), align 16, !tbaa !5 + store i32 7, i32* getelementptr inbounds ([11 x i32], [11 x i32]* @a, i64 0, i64 5), align 4, !tbaa !5 + store i32 6, i32* getelementptr inbounds ([11 x i32], [11 x i32]* @a, i64 0, i64 6), align 8, !tbaa !5 + store i32 5, i32* getelementptr inbounds ([11 x i32], [11 x i32]* @a, i64 0, i64 7), align 4, !tbaa !5 + store i32 4, i32* getelementptr inbounds ([11 x i32], [11 x i32]* @a, i64 0, i64 8), align 16, !tbaa !5 + store i32 3, i32* getelementptr inbounds ([11 x i32], [11 x i32]* @a, i64 0, i64 9), align 4, !tbaa !5 + store i32 2, i32* getelementptr inbounds ([11 x i32], [11 x i32]* @a, i64 0, i64 10), align 8, !tbaa !5 + br label %1 + +1: ; preds = %0, %22 + %2 = phi i64 [ 2, %0 ], [ %23, %22 ] + %3 = phi i64 [ 1, %0 ], [ %24, %22 ] + %4 = getelementptr inbounds [11 x i32], [11 x i32]* @a, i64 0, i64 %2 + %5 = load i32, i32* %4, align 4, !tbaa !5 + %6 = add nsw i64 %2, -1 + %7 = getelementptr inbounds [11 x i32], [11 x i32]* @a, i64 0, i64 %6 + %8 = load i32, i32* %7, align 4, !tbaa !5 + %9 = icmp ult i32 %5, %8 + br i1 %9, label %10, label %22 + +10: ; preds = %1, %10 + %11 = phi i64 [ %18, %10 ], [ %3, %1 ] + %12 = phi i32 [ %20, %10 ], [ %8, %1 ] + %13 = phi i32* [ %19, %10 ], [ %7, %1 ] + %14 = phi i32 [ %17, %10 ], [ %5, %1 ] + %15 = phi i32* [ %16, %10 ], [ %4, %1 ] + store i32 %12, i32* %15, align 4, !tbaa !5 + store i32 %14, i32* %13, align 4, !tbaa !5 + %16 = getelementptr inbounds [11 x i32], [11 x i32]* @a, i64 0, i64 %11 + %17 = load i32, i32* %16, align 4, !tbaa !5 + %18 = add nsw i64 %11, -1 + %19 = getelementptr inbounds [11 x i32], [11 x i32]* @a, i64 0, i64 %18 + %20 = load i32, i32* %19, align 4, !tbaa !5 + %21 = icmp ult i32 %17, %20 + br i1 %21, label %10, label %22, !llvm.loop !9 + +22: ; preds = %10, %1 + %23 = add nuw nsw i64 %2, 1 + %24 = add nuw nsw i64 %3, 1 + %25 = icmp eq i64 %23, 11 + br i1 %25, label %26, label %1, !llvm.loop !12 + +26: ; preds = %22 + ret i32 1 +} + +attributes #0 = { nofree norecurse nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = !{!6, !6, i64 0} +!6 = !{!"int", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C/C++ TBAA"} +!9 = distinct !{!9, !10, !11} +!10 = !{!"llvm.loop.mustprogress"} +!11 = !{!"llvm.loop.unroll.disable"} +!12 = distinct !{!12, !10, !11} diff --git a/test/janne_complex.ll b/test/janne_complex.ll new file mode 100644 index 0000000..3fb029c --- /dev/null +++ b/test/janne_complex.ll @@ -0,0 +1,26 @@ +; ModuleID = 'janne_complex.c' +source_filename = "janne_complex.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +; Function Attrs: nofree norecurse nosync nounwind readnone sspstrong uwtable +define dso_local i32 @complex(i32 %0, i32 %1) local_unnamed_addr #0 { + ret i32 1 +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone sspstrong uwtable willreturn +define dso_local i32 @main() local_unnamed_addr #1 { + ret i32 1 +} + +attributes #0 = { nofree norecurse nosync nounwind readnone sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #1 = { mustprogress nofree norecurse nosync nounwind readnone sspstrong uwtable willreturn "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} diff --git a/test/jfdctint.ll b/test/jfdctint.ll new file mode 100644 index 0000000..9cdc1b9 --- /dev/null +++ b/test/jfdctint.ll @@ -0,0 +1,235 @@ +; ModuleID = 'jfdctint.c' +source_filename = "jfdctint.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +@data = dso_local local_unnamed_addr global [64 x i32] zeroinitializer, align 16 + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local void @jpeg_fdct_islow() local_unnamed_addr #0 { + br label %1 + +1: ; preds = %0, %1 + %2 = phi i32 [ 7, %0 ], [ %78, %1 ] + %3 = phi i32* [ getelementptr inbounds ([64 x i32], [64 x i32]* @data, i64 0, i64 0), %0 ], [ %77, %1 ] + %4 = load i32, i32* %3, align 4, !tbaa !5 + %5 = getelementptr inbounds i32, i32* %3, i64 7 + %6 = load i32, i32* %5, align 4, !tbaa !5 + %7 = add nsw i32 %6, %4 + %8 = sub nsw i32 %4, %6 + %9 = getelementptr inbounds i32, i32* %3, i64 1 + %10 = load i32, i32* %9, align 4, !tbaa !5 + %11 = getelementptr inbounds i32, i32* %3, i64 6 + %12 = load i32, i32* %11, align 4, !tbaa !5 + %13 = add nsw i32 %12, %10 + %14 = sub nsw i32 %10, %12 + %15 = getelementptr inbounds i32, i32* %3, i64 2 + %16 = load i32, i32* %15, align 4, !tbaa !5 + %17 = getelementptr inbounds i32, i32* %3, i64 5 + %18 = load i32, i32* %17, align 4, !tbaa !5 + %19 = add nsw i32 %18, %16 + %20 = sub nsw i32 %16, %18 + %21 = getelementptr inbounds i32, i32* %3, i64 3 + %22 = load i32, i32* %21, align 4, !tbaa !5 + %23 = getelementptr inbounds i32, i32* %3, i64 4 + %24 = load i32, i32* %23, align 4, !tbaa !5 + %25 = add nsw i32 %24, %22 + %26 = sub nsw i32 %22, %24 + %27 = add nsw i32 %25, %7 + %28 = sub nsw i32 %7, %25 + %29 = add nsw i32 %19, %13 + %30 = sub nsw i32 %13, %19 + %31 = add nsw i32 %27, %29 + %32 = shl i32 %31, 2 + store i32 %32, i32* %3, align 4, !tbaa !5 + %33 = sub nsw i32 %27, %29 + %34 = shl i32 %33, 2 + store i32 %34, i32* %23, align 4, !tbaa !5 + %35 = add nsw i32 %28, %30 + %36 = mul nsw i32 %35, 4433 + %37 = mul nsw i32 %28, 6270 + %38 = add i32 %36, 1024 + %39 = add i32 %38, %37 + %40 = ashr i32 %39, 11 + store i32 %40, i32* %15, align 4, !tbaa !5 + %41 = mul nsw i32 %30, -15137 + %42 = add i32 %36, 1024 + %43 = add i32 %42, %41 + %44 = ashr i32 %43, 11 + store i32 %44, i32* %11, align 4, !tbaa !5 + %45 = add nsw i32 %26, %8 + %46 = add nsw i32 %20, %14 + %47 = add nsw i32 %26, %14 + %48 = add nsw i32 %20, %8 + %49 = add nsw i32 %47, %48 + %50 = mul nsw i32 %49, 9633 + %51 = mul nsw i32 %26, 2446 + %52 = mul nsw i32 %20, 16819 + %53 = mul nsw i32 %14, 25172 + %54 = mul nsw i32 %8, 12299 + %55 = mul nsw i32 %45, -7373 + %56 = mul nsw i32 %46, -20995 + %57 = mul nsw i32 %47, -16069 + %58 = mul nsw i32 %48, -3196 + %59 = add nsw i32 %50, %57 + %60 = add nsw i32 %50, %58 + %61 = add i32 %55, 1024 + %62 = add i32 %61, %51 + %63 = add i32 %62, %59 + %64 = ashr i32 %63, 11 + store i32 %64, i32* %5, align 4, !tbaa !5 + %65 = add i32 %56, 1024 + %66 = add i32 %65, %52 + %67 = add i32 %66, %60 + %68 = ashr i32 %67, 11 + store i32 %68, i32* %17, align 4, !tbaa !5 + %69 = add i32 %56, 1024 + %70 = add i32 %69, %53 + %71 = add i32 %70, %59 + %72 = ashr i32 %71, 11 + store i32 %72, i32* %21, align 4, !tbaa !5 + %73 = add i32 %55, 1024 + %74 = add i32 %73, %54 + %75 = add i32 %74, %60 + %76 = ashr i32 %75, 11 + store i32 %76, i32* %9, align 4, !tbaa !5 + %77 = getelementptr inbounds i32, i32* %3, i64 8 + %78 = add nsw i32 %2, -1 + %79 = icmp eq i32 %2, 0 + br i1 %79, label %80, label %1, !llvm.loop !9 + +80: ; preds = %1, %80 + %81 = phi i32 [ %159, %80 ], [ 7, %1 ] + %82 = phi i32* [ %158, %80 ], [ getelementptr inbounds ([64 x i32], [64 x i32]* @data, i64 0, i64 0), %1 ] + %83 = load i32, i32* %82, align 4, !tbaa !5 + %84 = getelementptr inbounds i32, i32* %82, i64 56 + %85 = load i32, i32* %84, align 4, !tbaa !5 + %86 = add nsw i32 %85, %83 + %87 = sub nsw i32 %83, %85 + %88 = getelementptr inbounds i32, i32* %82, i64 8 + %89 = load i32, i32* %88, align 4, !tbaa !5 + %90 = getelementptr inbounds i32, i32* %82, i64 48 + %91 = load i32, i32* %90, align 4, !tbaa !5 + %92 = add nsw i32 %91, %89 + %93 = sub nsw i32 %89, %91 + %94 = getelementptr inbounds i32, i32* %82, i64 16 + %95 = load i32, i32* %94, align 4, !tbaa !5 + %96 = getelementptr inbounds i32, i32* %82, i64 40 + %97 = load i32, i32* %96, align 4, !tbaa !5 + %98 = add nsw i32 %97, %95 + %99 = sub nsw i32 %95, %97 + %100 = getelementptr inbounds i32, i32* %82, i64 24 + %101 = load i32, i32* %100, align 4, !tbaa !5 + %102 = getelementptr inbounds i32, i32* %82, i64 32 + %103 = load i32, i32* %102, align 4, !tbaa !5 + %104 = add nsw i32 %103, %101 + %105 = sub nsw i32 %101, %103 + %106 = add nsw i32 %104, %86 + %107 = sub nsw i32 %86, %104 + %108 = add nsw i32 %98, %92 + %109 = sub nsw i32 %92, %98 + %110 = add i32 %108, 2 + %111 = add i32 %110, %106 + %112 = ashr i32 %111, 2 + store i32 %112, i32* %82, align 4, !tbaa !5 + %113 = sub i32 2, %108 + %114 = add i32 %113, %106 + %115 = ashr i32 %114, 2 + store i32 %115, i32* %102, align 4, !tbaa !5 + %116 = add nsw i32 %107, %109 + %117 = mul nsw i32 %116, 4433 + %118 = mul nsw i32 %107, 6270 + %119 = add i32 %117, 16384 + %120 = add i32 %119, %118 + %121 = ashr i32 %120, 15 + store i32 %121, i32* %94, align 4, !tbaa !5 + %122 = mul nsw i32 %109, -15137 + %123 = add i32 %117, 16384 + %124 = add i32 %123, %122 + %125 = ashr i32 %124, 15 + store i32 %125, i32* %90, align 4, !tbaa !5 + %126 = add nsw i32 %105, %87 + %127 = add nsw i32 %99, %93 + %128 = add nsw i32 %105, %93 + %129 = add nsw i32 %99, %87 + %130 = add nsw i32 %128, %129 + %131 = mul nsw i32 %130, 9633 + %132 = mul nsw i32 %105, 2446 + %133 = mul nsw i32 %99, 16819 + %134 = mul nsw i32 %93, 25172 + %135 = mul nsw i32 %87, 12299 + %136 = mul nsw i32 %126, -7373 + %137 = mul nsw i32 %127, -20995 + %138 = mul nsw i32 %128, -16069 + %139 = mul nsw i32 %129, -3196 + %140 = add nsw i32 %131, %138 + %141 = add nsw i32 %131, %139 + %142 = add i32 %136, 16384 + %143 = add i32 %142, %132 + %144 = add i32 %143, %140 + %145 = ashr i32 %144, 15 + store i32 %145, i32* %84, align 4, !tbaa !5 + %146 = add i32 %137, 16384 + %147 = add i32 %146, %133 + %148 = add i32 %147, %141 + %149 = ashr i32 %148, 15 + store i32 %149, i32* %96, align 4, !tbaa !5 + %150 = add i32 %137, 16384 + %151 = add i32 %150, %134 + %152 = add i32 %151, %140 + %153 = ashr i32 %152, 15 + store i32 %153, i32* %100, align 4, !tbaa !5 + %154 = add i32 %136, 16384 + %155 = add i32 %154, %135 + %156 = add i32 %155, %141 + %157 = ashr i32 %156, 15 + store i32 %157, i32* %88, align 4, !tbaa !5 + %158 = getelementptr inbounds i32, i32* %82, i64 1 + %159 = add nsw i32 %81, -1 + %160 = icmp eq i32 %81, 0 + br i1 %160, label %161, label %80, !llvm.loop !12 + +161: ; preds = %80 + ret void +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local void @main() local_unnamed_addr #0 { + br label %1 + +1: ; preds = %0, %1 + %2 = phi i64 [ 0, %0 ], [ %8, %1 ] + %3 = phi i32 [ 1, %0 ], [ %6, %1 ] + %4 = mul nsw i32 %3, 133 + %5 = add nsw i32 %4, 81 + %6 = srem i32 %5, 65535 + %7 = getelementptr inbounds [64 x i32], [64 x i32]* @data, i64 0, i64 %2 + store i32 %6, i32* %7, align 4, !tbaa !5 + %8 = add nuw nsw i64 %2, 1 + %9 = icmp eq i64 %8, 64 + br i1 %9, label %10, label %1, !llvm.loop !13 + +10: ; preds = %1 + call void @jpeg_fdct_islow() + ret void +} + +attributes #0 = { nofree norecurse nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = !{!6, !6, i64 0} +!6 = !{!"int", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C/C++ TBAA"} +!9 = distinct !{!9, !10, !11} +!10 = !{!"llvm.loop.mustprogress"} +!11 = !{!"llvm.loop.unroll.disable"} +!12 = distinct !{!12, !10, !11} +!13 = distinct !{!13, !10, !11} diff --git a/test/lcdnum.ll b/test/lcdnum.ll new file mode 100644 index 0000000..06820bd --- /dev/null +++ b/test/lcdnum.ll @@ -0,0 +1,79 @@ +; ModuleID = 'lcdnum.c' +source_filename = "lcdnum.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +@IN = dso_local global i8 0, align 1 +@OUT = dso_local global i8 0, align 1 +@switch.table.main = private unnamed_addr constant [15 x i8] c"$]m.]{%\7Fo?zS|[\1B", align 1 + +; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone sspstrong uwtable willreturn +define dso_local zeroext i8 @num_to_lcd(i8 zeroext %0) local_unnamed_addr #0 { + %2 = add i8 %0, -1 + %3 = icmp ult i8 %2, 15 + br i1 %3, label %4, label %8 + +4: ; preds = %1 + %5 = sext i8 %2 to i64 + %6 = getelementptr inbounds [15 x i8], [15 x i8]* @switch.table.main, i64 0, i64 %5 + %7 = load i8, i8* %6, align 1 + br label %8 + +8: ; preds = %4, %1 + %9 = phi i8 [ 0, %1 ], [ %7, %4 ] + ret i8 %9 +} + +; Function Attrs: nofree norecurse nounwind sspstrong uwtable +define dso_local i32 @main() local_unnamed_addr #1 { + br label %1 + +1: ; preds = %0, %15 + %2 = phi i32 [ 0, %0 ], [ %16, %15 ] + %3 = load volatile i8, i8* @IN, align 1, !tbaa !5 + %4 = icmp ult i32 %2, 5 + br i1 %4, label %5, label %15 + +5: ; preds = %1 + %6 = and i8 %3, 15 + %7 = add nsw i8 %6, -1 + %8 = icmp ult i8 %7, 15 + br i1 %8, label %9, label %13 + +9: ; preds = %5 + %10 = sext i8 %7 to i64 + %11 = getelementptr inbounds [15 x i8], [15 x i8]* @switch.table.main, i64 0, i64 %10 + %12 = load i8, i8* %11, align 1 + br label %13 + +13: ; preds = %9, %5 + %14 = phi i8 [ 0, %5 ], [ %12, %9 ] + store volatile i8 %14, i8* @OUT, align 1, !tbaa !5 + br label %15 + +15: ; preds = %1, %13 + %16 = add nuw nsw i32 %2, 1 + %17 = icmp eq i32 %16, 10 + br i1 %17, label %18, label %1, !llvm.loop !8 + +18: ; preds = %15 + ret i32 0 +} + +attributes #0 = { mustprogress nofree norecurse nosync nounwind readnone sspstrong uwtable willreturn "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #1 = { nofree norecurse nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = !{!6, !6, i64 0} +!6 = !{!"omnipotent char", !7, i64 0} +!7 = !{!"Simple C/C++ TBAA"} +!8 = distinct !{!8, !9, !10} +!9 = !{!"llvm.loop.mustprogress"} +!10 = !{!"llvm.loop.unroll.disable"} diff --git a/test/lms.ll b/test/lms.ll new file mode 100644 index 0000000..c48184d --- /dev/null +++ b/test/lms.ll @@ -0,0 +1,486 @@ +; ModuleID = 'lms.c' +source_filename = "lms.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +@mu = dso_local local_unnamed_addr global float 0x3F847AE140000000, align 4 +@lms_rand.next = internal unnamed_addr global i64 1, align 8 +@main.d = internal unnamed_addr global [201 x float] zeroinitializer, align 16 +@main.b = internal unnamed_addr global [21 x float] zeroinitializer, align 16 +@lms.px = internal unnamed_addr global [51 x float] zeroinitializer, align 16 +@lms.sigma = internal unnamed_addr global float 2.000000e+00, align 4 +@gaussian.ready = internal unnamed_addr global i1 false, align 4 +@gaussian.gstore = internal unnamed_addr global float 0.000000e+00, align 4 + +; Function Attrs: mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn +define dso_local i32 @lms_rand() local_unnamed_addr #0 { + %1 = load i64, i64* @lms_rand.next, align 8, !tbaa !5 + %2 = mul i64 %1, 1103515245 + %3 = add i64 %2, 12345 + store i64 %3, i64* @lms_rand.next, align 8, !tbaa !5 + %4 = lshr i64 %3, 16 + %5 = trunc i64 %4 to i32 + %6 = and i32 %5, 32767 + ret i32 %6 +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local i32 @main() local_unnamed_addr #1 { + br label %1 + +1: ; preds = %23, %0 + %2 = phi i32 [ 0, %0 ], [ %25, %23 ] + %3 = phi i32 [ 1, %0 ], [ %26, %23 ] + %4 = phi float [ 0x3FC99999A0000000, %0 ], [ %24, %23 ] + %5 = icmp eq i32 %2, 0 + br i1 %5, label %6, label %23 + +6: ; preds = %1 + %7 = fmul float %4, %4 + %8 = fsub float 2.000000e+00, %7 + %9 = fpext float %8 to double + %10 = fpext float %4 to double + %11 = fmul double %10, 2.000000e+00 + %12 = fdiv double %9, %11 + %13 = fptrunc double %12 to float + %14 = fadd float %4, %13 + %15 = fmul float %14, %14 + %16 = fsub float 2.000000e+00, %15 + %17 = fcmp ult float %16, 0.000000e+00 + %18 = fneg float %16 + %19 = select i1 %17, float %18, float %16 + %20 = fpext float %19 to double + %21 = fcmp ugt double %20, 1.000000e-05 + br i1 %21, label %23, label %22 + +22: ; preds = %6 + br label %23 + +23: ; preds = %22, %6, %1 + %24 = phi float [ %4, %1 ], [ %14, %22 ], [ %14, %6 ] + %25 = phi i32 [ 1, %1 ], [ 1, %22 ], [ 0, %6 ] + %26 = add nuw nsw i32 %3, 1 + %27 = icmp eq i32 %26, 20 + br i1 %27, label %28, label %1, !llvm.loop !9 + +28: ; preds = %23, %50 + %29 = phi i32 [ %52, %50 ], [ 0, %23 ] + %30 = phi i32 [ %53, %50 ], [ 1, %23 ] + %31 = phi float [ %51, %50 ], [ 0x3FF3333340000000, %23 ] + %32 = icmp eq i32 %29, 0 + br i1 %32, label %33, label %50 + +33: ; preds = %28 + %34 = fmul float %31, %31 + %35 = fsub float 1.200000e+01, %34 + %36 = fpext float %35 to double + %37 = fpext float %31 to double + %38 = fmul double %37, 2.000000e+00 + %39 = fdiv double %36, %38 + %40 = fptrunc double %39 to float + %41 = fadd float %31, %40 + %42 = fmul float %41, %41 + %43 = fsub float 1.200000e+01, %42 + %44 = fcmp ult float %43, 0.000000e+00 + %45 = fneg float %43 + %46 = select i1 %44, float %45, float %43 + %47 = fpext float %46 to double + %48 = fcmp ugt double %47, 1.000000e-05 + br i1 %48, label %50, label %49 + +49: ; preds = %33 + br label %50 + +50: ; preds = %49, %33, %28 + %51 = phi float [ %31, %28 ], [ %41, %49 ], [ %41, %33 ] + %52 = phi i32 [ 1, %28 ], [ 1, %49 ], [ 0, %33 ] + %53 = add nuw nsw i32 %30, 1 + %54 = icmp eq i32 %53, 20 + br i1 %54, label %55, label %28, !llvm.loop !9 + +55: ; preds = %50 + %56 = fpext float %51 to double + %57 = fmul double %56, 2.000000e-01 + %58 = fptrunc double %57 to float + br label %59 + +59: ; preds = %55, %203 + %60 = phi i64 [ 0, %55 ], [ %208, %203 ] + %61 = trunc i64 %60 to i32 + %62 = sitofp i32 %61 to float + %63 = fmul float %62, 0x3FD41B2F80000000 + %64 = fpext float %63 to double + %65 = fcmp ogt double %64, 0x401921FB54442D18 + br i1 %65, label %70, label %66 + +66: ; preds = %70, %59 + %67 = phi float [ %63, %59 ], [ %73, %70 ] + %68 = fpext float %67 to double + %69 = fcmp olt double %68, 0xC01921FB54442D18 + br i1 %69, label %76, label %82 + +70: ; preds = %59, %70 + %71 = phi double [ %74, %70 ], [ %64, %59 ] + %72 = fadd double %71, 0xC01921FB54442D18 + %73 = fptrunc double %72 to float + %74 = fpext float %73 to double + %75 = fcmp ogt double %74, 0x401921FB54442D18 + br i1 %75, label %70, label %66, !llvm.loop !12 + +76: ; preds = %66, %76 + %77 = phi double [ %80, %76 ], [ %68, %66 ] + %78 = fadd double %77, 0x401921FB54442D18 + %79 = fptrunc double %78 to float + %80 = fpext float %79 to double + %81 = fcmp olt double %80, 0xC01921FB54442D18 + br i1 %81, label %76, label %82, !llvm.loop !13 + +82: ; preds = %76, %66 + %83 = phi float [ %67, %66 ], [ %79, %76 ] + %84 = fneg float %83 + %85 = fmul float %83, %84 + %86 = fmul float %83, %85 + %87 = fdiv float %86, 6.000000e+00 + %88 = fadd float %83, %87 + %89 = fcmp ult float %87, 0.000000e+00 + %90 = fneg float %87 + %91 = select i1 %89, float %90, float %87 + %92 = fpext float %91 to double + %93 = fcmp ult double %92, 1.000000e-05 + br i1 %93, label %113, label %94 + +94: ; preds = %82, %94 + %95 = phi i32 [ %107, %94 ], [ 2, %82 ] + %96 = phi float [ %105, %94 ], [ %87, %82 ] + %97 = phi float [ %106, %94 ], [ %88, %82 ] + %98 = fmul float %85, %96 + %99 = fpext float %98 to double + %100 = sitofp i32 %95 to double + %101 = fmul double %100, 2.000000e+00 + %102 = fadd double %101, 1.000000e+00 + %103 = fmul double %101, %102 + %104 = fdiv double %99, %103 + %105 = fptrunc double %104 to float + %106 = fadd float %97, %105 + %107 = add nuw nsw i32 %95, 1 + %108 = fcmp ult float %105, 0.000000e+00 + %109 = fneg float %105 + %110 = select i1 %108, float %109, float %105 + %111 = fpext float %110 to double + %112 = fcmp ult double %111, 1.000000e-05 + br i1 %112, label %113, label %94, !llvm.loop !14 + +113: ; preds = %94, %82 + %114 = phi float [ %88, %82 ], [ %106, %94 ] + %115 = fmul float %24, %114 + %116 = load i1, i1* @gaussian.ready, align 4 + br i1 %116, label %201, label %117 + +117: ; preds = %113 + %118 = load i64, i64* @lms_rand.next, align 8, !tbaa !5 + %119 = mul i64 %118, 1103515245 + %120 = add i64 %119, 12345 + %121 = lshr i64 %120, 16 + %122 = trunc i64 %121 to i32 + %123 = and i32 %122, 32767 + %124 = sitofp i32 %123 to float + %125 = fadd float %124, -1.638400e+04 + %126 = mul i64 %120, 1103515245 + %127 = add i64 %126, 12345 + store i64 %127, i64* @lms_rand.next, align 8, !tbaa !5 + %128 = lshr i64 %127, 16 + %129 = trunc i64 %128 to i32 + %130 = and i32 %129, 32767 + %131 = sitofp i32 %130 to float + %132 = fadd float %131, -1.638400e+04 + %133 = fmul float %125, 0x3F10000000000000 + %134 = fmul float %132, 0x3F10000000000000 + %135 = fmul float %133, %133 + %136 = fmul float %134, %134 + %137 = fadd float %135, %136 + %138 = fcmp ogt float %137, 1.000000e+00 + br i1 %138, label %139, label %162 + +139: ; preds = %117, %139 + %140 = phi i64 [ %149, %139 ], [ %127, %117 ] + %141 = mul i64 %140, 1103515245 + %142 = add i64 %141, 12345 + %143 = lshr i64 %142, 16 + %144 = trunc i64 %143 to i32 + %145 = and i32 %144, 32767 + %146 = sitofp i32 %145 to float + %147 = fadd float %146, -1.638400e+04 + %148 = mul i64 %142, 1103515245 + %149 = add i64 %148, 12345 + %150 = lshr i64 %149, 16 + %151 = trunc i64 %150 to i32 + %152 = and i32 %151, 32767 + %153 = sitofp i32 %152 to float + %154 = fadd float %153, -1.638400e+04 + %155 = fmul float %147, 0x3F10000000000000 + %156 = fmul float %154, 0x3F10000000000000 + %157 = fmul float %155, %155 + %158 = fmul float %156, %156 + %159 = fadd float %157, %158 + %160 = fcmp ogt float %159, 1.000000e+00 + br i1 %160, label %139, label %161, !llvm.loop !15 + +161: ; preds = %139 + store i64 %149, i64* @lms_rand.next, align 8, !tbaa !5 + br label %162 + +162: ; preds = %161, %117 + %163 = phi float [ %156, %161 ], [ %134, %117 ] + %164 = phi float [ %159, %161 ], [ %137, %117 ] + %165 = phi float [ %155, %161 ], [ %133, %117 ] + %166 = fdiv float -9.000000e+00, %164 + %167 = fcmp oeq float %166, 0.000000e+00 + br i1 %167, label %197, label %168 + +168: ; preds = %162 + %169 = fdiv float %166, 1.000000e+01 + br label %170 + +170: ; preds = %192, %168 + %171 = phi i32 [ 0, %168 ], [ %194, %192 ] + %172 = phi i32 [ 1, %168 ], [ %195, %192 ] + %173 = phi float [ %169, %168 ], [ %193, %192 ] + %174 = icmp eq i32 %171, 0 + br i1 %174, label %175, label %192 + +175: ; preds = %170 + %176 = fmul float %173, %173 + %177 = fsub float %166, %176 + %178 = fpext float %177 to double + %179 = fpext float %173 to double + %180 = fmul double %179, 2.000000e+00 + %181 = fdiv double %178, %180 + %182 = fptrunc double %181 to float + %183 = fadd float %173, %182 + %184 = fmul float %183, %183 + %185 = fsub float %166, %184 + %186 = fcmp ult float %185, 0.000000e+00 + %187 = fneg float %185 + %188 = select i1 %186, float %187, float %185 + %189 = fpext float %188 to double + %190 = fcmp ugt double %189, 1.000000e-05 + br i1 %190, label %192, label %191 + +191: ; preds = %175 + br label %192 + +192: ; preds = %191, %175, %170 + %193 = phi float [ %173, %170 ], [ %183, %191 ], [ %183, %175 ] + %194 = phi i32 [ 1, %170 ], [ 1, %191 ], [ 0, %175 ] + %195 = add nuw nsw i32 %172, 1 + %196 = icmp eq i32 %195, 20 + br i1 %196, label %197, label %170, !llvm.loop !9 + +197: ; preds = %192, %162 + %198 = phi float [ 0.000000e+00, %162 ], [ %193, %192 ] + %199 = fmul float %165, %198 + store float %199, float* @gaussian.gstore, align 4, !tbaa !16 + %200 = fmul float %163, %198 + store i1 true, i1* @gaussian.ready, align 4 + br label %203 + +201: ; preds = %113 + store i1 false, i1* @gaussian.ready, align 4 + %202 = load float, float* @gaussian.gstore, align 4, !tbaa !16 + br label %203 + +203: ; preds = %197, %201 + %204 = phi float [ %200, %197 ], [ %202, %201 ] + %205 = fmul float %204, %58 + %206 = fadd float %115, %205 + %207 = getelementptr inbounds [201 x float], [201 x float]* @main.d, i64 0, i64 %60 + store float %206, float* %207, align 4, !tbaa !16 + %208 = add nuw nsw i64 %60, 1 + %209 = icmp eq i64 %208, 201 + br i1 %209, label %210, label %59, !llvm.loop !18 + +210: ; preds = %203 + %211 = load float, float* @mu, align 4, !tbaa !16 + %212 = fpext float %211 to double + %213 = fmul double %212, 2.000000e+00 + %214 = fdiv double %213, 2.100000e+01 + %215 = fptrunc double %214 to float + store float %215, float* @mu, align 4, !tbaa !16 + %216 = load float, float* @lms.sigma, align 4, !tbaa !16 + br label %217 + +217: ; preds = %210, %254 + %218 = phi i64 [ 0, %210 ], [ %255, %254 ] + %219 = phi float [ 0.000000e+00, %210 ], [ %222, %254 ] + %220 = phi float [ %216, %210 ], [ %241, %254 ] + %221 = getelementptr inbounds [201 x float], [201 x float]* @main.d, i64 0, i64 %218 + %222 = load float, float* %221, align 4, !tbaa !16 + store float %219, float* getelementptr inbounds ([51 x float], [51 x float]* @lms.px, i64 0, i64 0), align 16, !tbaa !16 + %223 = load float, float* getelementptr inbounds ([21 x float], [21 x float]* @main.b, i64 0, i64 0), align 16, !tbaa !16 + %224 = fmul float %219, %223 + br label %225 + +225: ; preds = %225, %217 + %226 = phi i64 [ 1, %217 ], [ %234, %225 ] + %227 = phi float [ %224, %217 ], [ %233, %225 ] + %228 = getelementptr inbounds [21 x float], [21 x float]* @main.b, i64 0, i64 %226 + %229 = load float, float* %228, align 4, !tbaa !16 + %230 = getelementptr inbounds [51 x float], [51 x float]* @lms.px, i64 0, i64 %226 + %231 = load float, float* %230, align 4, !tbaa !16 + %232 = fmul float %229, %231 + %233 = fadd float %227, %232 + %234 = add nuw nsw i64 %226, 1 + %235 = icmp eq i64 %234, 21 + br i1 %235, label %236, label %225, !llvm.loop !19 + +236: ; preds = %225 + %237 = fsub float %222, %233 + %238 = fmul float %219, %219 + %239 = fmul float %238, 0x3F847AE140000000 + %240 = fmul float %220, 0x3FEFAE1480000000 + %241 = fadd float %239, %240 + %242 = fmul float %237, %215 + %243 = fdiv float %242, %241 + br label %244 + +244: ; preds = %244, %236 + %245 = phi i64 [ 0, %236 ], [ %252, %244 ] + %246 = getelementptr inbounds [21 x float], [21 x float]* @main.b, i64 0, i64 %245 + %247 = load float, float* %246, align 4, !tbaa !16 + %248 = getelementptr inbounds [51 x float], [51 x float]* @lms.px, i64 0, i64 %245 + %249 = load float, float* %248, align 4, !tbaa !16 + %250 = fmul float %243, %249 + %251 = fadd float %247, %250 + store float %251, float* %246, align 4, !tbaa !16 + %252 = add nuw nsw i64 %245, 1 + %253 = icmp eq i64 %252, 21 + br i1 %253, label %254, label %244, !llvm.loop !20 + +254: ; preds = %244 + call void @llvm.memmove.p0i8.p0i8.i64(i8* noundef nonnull align 4 dereferenceable(80) bitcast (float* getelementptr inbounds ([51 x float], [51 x float]* @lms.px, i64 0, i64 1) to i8*), i8* noundef nonnull align 16 dereferenceable(80) bitcast ([51 x float]* @lms.px to i8*), i64 80, i1 false) + %255 = add nuw nsw i64 %218, 1 + %256 = icmp eq i64 %255, 201 + br i1 %256, label %257, label %217, !llvm.loop !21 + +257: ; preds = %254 + store float %241, float* @lms.sigma, align 4, !tbaa !16 + ret i32 0 +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local float @lms(float %0, float %1, float* nocapture %2, i32 %3, float %4, float %5) local_unnamed_addr #1 { + store float %0, float* getelementptr inbounds ([51 x float], [51 x float]* @lms.px, i64 0, i64 0), align 16, !tbaa !16 + %7 = load float, float* %2, align 4, !tbaa !16 + %8 = fmul float %7, %0 + %9 = icmp slt i32 %3, 1 + br i1 %9, label %24, label %10 + +10: ; preds = %6 + %11 = add i32 %3, 1 + %12 = zext i32 %11 to i64 + br label %13 + +13: ; preds = %10, %13 + %14 = phi i64 [ 1, %10 ], [ %22, %13 ] + %15 = phi float [ %8, %10 ], [ %21, %13 ] + %16 = getelementptr inbounds float, float* %2, i64 %14 + %17 = load float, float* %16, align 4, !tbaa !16 + %18 = getelementptr inbounds [51 x float], [51 x float]* @lms.px, i64 0, i64 %14 + %19 = load float, float* %18, align 4, !tbaa !16 + %20 = fmul float %17, %19 + %21 = fadd float %15, %20 + %22 = add nuw nsw i64 %14, 1 + %23 = icmp eq i64 %22, %12 + br i1 %23, label %24, label %13, !llvm.loop !19 + +24: ; preds = %13, %6 + %25 = phi float [ %8, %6 ], [ %21, %13 ] + %26 = fsub float %1, %25 + %27 = fmul float %0, %0 + %28 = fmul float %27, %5 + %29 = fsub float 1.000000e+00, %5 + %30 = load float, float* @lms.sigma, align 4, !tbaa !16 + %31 = fmul float %29, %30 + %32 = fadd float %28, %31 + store float %32, float* @lms.sigma, align 4, !tbaa !16 + %33 = fmul float %26, %4 + %34 = fdiv float %33, %32 + %35 = icmp slt i32 %3, 0 + br i1 %35, label %39, label %36 + +36: ; preds = %24 + %37 = add i32 %3, 1 + %38 = zext i32 %37 to i64 + br label %43 + +39: ; preds = %43, %24 + %40 = icmp sgt i32 %3, 0 + br i1 %40, label %41, label %63 + +41: ; preds = %39 + %42 = zext i32 %3 to i64 + br label %53 + +43: ; preds = %36, %43 + %44 = phi i64 [ 0, %36 ], [ %51, %43 ] + %45 = getelementptr inbounds float, float* %2, i64 %44 + %46 = load float, float* %45, align 4, !tbaa !16 + %47 = getelementptr inbounds [51 x float], [51 x float]* @lms.px, i64 0, i64 %44 + %48 = load float, float* %47, align 4, !tbaa !16 + %49 = fmul float %34, %48 + %50 = fadd float %46, %49 + store float %50, float* %45, align 4, !tbaa !16 + %51 = add nuw nsw i64 %44, 1 + %52 = icmp eq i64 %51, %38 + br i1 %52, label %39, label %43, !llvm.loop !20 + +53: ; preds = %41, %53 + %54 = phi i64 [ %42, %41 ], [ %62, %53 ] + %55 = phi i32 [ %3, %41 ], [ %56, %53 ] + %56 = add nsw i32 %55, -1 + %57 = zext i32 %56 to i64 + %58 = getelementptr inbounds [51 x float], [51 x float]* @lms.px, i64 0, i64 %57 + %59 = load float, float* %58, align 4, !tbaa !16 + %60 = getelementptr inbounds [51 x float], [51 x float]* @lms.px, i64 0, i64 %54 + store float %59, float* %60, align 4, !tbaa !16 + %61 = icmp sgt i64 %54, 1 + %62 = add nsw i64 %54, -1 + br i1 %61, label %53, label %63, !llvm.loop !22 + +63: ; preds = %53, %39 + ret float %25 +} + +; Function Attrs: argmemonly nofree nounwind willreturn +declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1 immarg) #2 + +attributes #0 = { mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #1 = { nofree norecurse nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #2 = { argmemonly nofree nounwind willreturn } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = !{!6, !6, i64 0} +!6 = !{!"long", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C/C++ TBAA"} +!9 = distinct !{!9, !10, !11} +!10 = !{!"llvm.loop.mustprogress"} +!11 = !{!"llvm.loop.unroll.disable"} +!12 = distinct !{!12, !10, !11} +!13 = distinct !{!13, !10, !11} +!14 = distinct !{!14, !10, !11} +!15 = distinct !{!15, !10, !11} +!16 = !{!17, !17, i64 0} +!17 = !{!"float", !7, i64 0} +!18 = distinct !{!18, !10, !11} +!19 = distinct !{!19, !10, !11} +!20 = distinct !{!20, !10, !11} +!21 = distinct !{!21, !10, !11} +!22 = distinct !{!22, !10, !11} diff --git a/test/ludcmp.ll b/test/ludcmp.ll new file mode 100644 index 0000000..6d3ca4d --- /dev/null +++ b/test/ludcmp.ll @@ -0,0 +1,269 @@ +; ModuleID = 'ludcmp.c' +source_filename = "ludcmp.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +@a = dso_local local_unnamed_addr global [50 x [50 x double]] zeroinitializer, align 16 +@b = dso_local local_unnamed_addr global [50 x double] zeroinitializer, align 16 +@x = dso_local local_unnamed_addr global [50 x double] zeroinitializer, align 16 + +; Function Attrs: nofree nosync nounwind sspstrong uwtable +define dso_local i32 @main() local_unnamed_addr #0 { + br label %1 + +1: ; preds = %0, %17 + %2 = phi i64 [ 0, %0 ], [ %3, %17 ] + %3 = add nuw nsw i64 %2, 1 + br label %4 + +4: ; preds = %1, %4 + %5 = phi i64 [ 0, %1 ], [ %7, %4 ] + %6 = phi double [ 0.000000e+00, %1 ], [ %15, %4 ] + %7 = add nuw nsw i64 %5, 1 + %8 = add nuw nsw i64 %3, %7 + %9 = trunc i64 %8 to i32 + %10 = sitofp i32 %9 to double + %11 = getelementptr inbounds [50 x [50 x double]], [50 x [50 x double]]* @a, i64 0, i64 %2, i64 %5 + %12 = icmp eq i64 %2, %5 + %13 = fmul double %10, 1.000000e+01 + %14 = select i1 %12, double %13, double %10 + store double %14, double* %11, align 8, !tbaa !5 + %15 = fadd double %6, %14 + %16 = icmp eq i64 %7, 6 + br i1 %16, label %17, label %4, !llvm.loop !9 + +17: ; preds = %4 + %18 = getelementptr inbounds [50 x double], [50 x double]* @b, i64 0, i64 %2 + store double %15, double* %18, align 8, !tbaa !5 + %19 = icmp eq i64 %3, 6 + br i1 %19, label %20, label %1, !llvm.loop !12 + +20: ; preds = %17 + %21 = call i32 @ludcmp(i32 5, double 0x3EB0C6F7A0B5ED8D) + ret i32 0 +} + +; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn +declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #1 + +; Function Attrs: nofree nosync nounwind sspstrong uwtable +define dso_local i32 @ludcmp(i32 %0, double %1) local_unnamed_addr #0 { + %3 = alloca [100 x double], align 16 + %4 = bitcast [100 x double]* %3 to i8* + call void @llvm.lifetime.start.p0i8(i64 800, i8* nonnull %4) #2 + %5 = icmp sgt i32 %0, 99 + %6 = fcmp ole double %1, 0.000000e+00 + %7 = select i1 %5, i1 true, i1 %6 + br i1 %7, label %133, label %8 + +8: ; preds = %2 + %9 = icmp sgt i32 %0, 0 + br i1 %9, label %10, label %69 + +10: ; preds = %8 + %11 = add i32 %0, 1 + %12 = zext i32 %0 to i64 + %13 = zext i32 %11 to i64 + %14 = zext i32 %11 to i64 + br label %18 + +15: ; preds = %66 + %16 = add nuw nsw i64 %20, 1 + %17 = icmp eq i64 %28, %12 + br i1 %17, label %69, label %18, !llvm.loop !13 + +18: ; preds = %10, %15 + %19 = phi i64 [ 0, %10 ], [ %28, %15 ] + %20 = phi i64 [ 1, %10 ], [ %16, %15 ] + %21 = getelementptr inbounds [50 x [50 x double]], [50 x [50 x double]]* @a, i64 0, i64 %19, i64 %19 + %22 = load double, double* %21, align 8, !tbaa !5 + %23 = fcmp ult double %22, 0.000000e+00 + %24 = fneg double %22 + %25 = select i1 %23, double %24, double %22 + %26 = fcmp ugt double %25, %1 + br i1 %26, label %27, label %133 + +27: ; preds = %18 + %28 = add nuw nsw i64 %19, 1 + %29 = icmp eq i64 %19, 0 + br label %30 + +30: ; preds = %27, %45 + %31 = phi i64 [ %20, %27 ], [ %49, %45 ] + %32 = getelementptr inbounds [50 x [50 x double]], [50 x [50 x double]]* @a, i64 0, i64 %31, i64 %19 + %33 = load double, double* %32, align 8, !tbaa !5 + br i1 %29, label %45, label %34 + +34: ; preds = %30, %34 + %35 = phi i64 [ %43, %34 ], [ 0, %30 ] + %36 = phi double [ %42, %34 ], [ %33, %30 ] + %37 = getelementptr inbounds [50 x [50 x double]], [50 x [50 x double]]* @a, i64 0, i64 %31, i64 %35 + %38 = load double, double* %37, align 8, !tbaa !5 + %39 = getelementptr inbounds [50 x [50 x double]], [50 x [50 x double]]* @a, i64 0, i64 %35, i64 %19 + %40 = load double, double* %39, align 8, !tbaa !5 + %41 = fmul double %38, %40 + %42 = fsub double %36, %41 + %43 = add nuw nsw i64 %35, 1 + %44 = icmp eq i64 %43, %19 + br i1 %44, label %45, label %34, !llvm.loop !14 + +45: ; preds = %34, %30 + %46 = phi double [ %33, %30 ], [ %42, %34 ] + %47 = load double, double* %21, align 8, !tbaa !5 + %48 = fdiv double %46, %47 + store double %48, double* %32, align 8, !tbaa !5 + %49 = add nuw nsw i64 %31, 1 + %50 = icmp eq i64 %49, %13 + br i1 %50, label %51, label %30, !llvm.loop !15 + +51: ; preds = %45, %66 + %52 = phi i64 [ %67, %66 ], [ %20, %45 ] + %53 = getelementptr inbounds [50 x [50 x double]], [50 x [50 x double]]* @a, i64 0, i64 %28, i64 %52 + %54 = load double, double* %53, align 8, !tbaa !5 + br label %55 + +55: ; preds = %51, %55 + %56 = phi i64 [ 0, %51 ], [ %64, %55 ] + %57 = phi double [ %54, %51 ], [ %63, %55 ] + %58 = getelementptr inbounds [50 x [50 x double]], [50 x [50 x double]]* @a, i64 0, i64 %28, i64 %56 + %59 = load double, double* %58, align 8, !tbaa !5 + %60 = getelementptr inbounds [50 x [50 x double]], [50 x [50 x double]]* @a, i64 0, i64 %56, i64 %52 + %61 = load double, double* %60, align 8, !tbaa !5 + %62 = fmul double %59, %61 + %63 = fsub double %57, %62 + %64 = add nuw nsw i64 %56, 1 + %65 = icmp eq i64 %64, %20 + br i1 %65, label %66, label %55, !llvm.loop !16 + +66: ; preds = %55 + store double %63, double* %53, align 8, !tbaa !5 + %67 = add nuw nsw i64 %52, 1 + %68 = icmp eq i64 %67, %14 + br i1 %68, label %15, label %51, !llvm.loop !17 + +69: ; preds = %15, %8 + %70 = load double, double* getelementptr inbounds ([50 x double], [50 x double]* @b, i64 0, i64 0), align 16, !tbaa !5 + %71 = getelementptr inbounds [100 x double], [100 x double]* %3, i64 0, i64 0 + store double %70, double* %71, align 16, !tbaa !5 + %72 = icmp slt i32 %0, 1 + br i1 %72, label %95, label %73 + +73: ; preds = %69 + %74 = add i32 %0, 1 + %75 = zext i32 %74 to i64 + br label %76 + +76: ; preds = %73, %91 + %77 = phi i64 [ 1, %73 ], [ %93, %91 ] + %78 = getelementptr inbounds [50 x double], [50 x double]* @b, i64 0, i64 %77 + %79 = load double, double* %78, align 8, !tbaa !5 + br label %80 + +80: ; preds = %76, %80 + %81 = phi i64 [ 0, %76 ], [ %89, %80 ] + %82 = phi double [ %79, %76 ], [ %88, %80 ] + %83 = getelementptr inbounds [50 x [50 x double]], [50 x [50 x double]]* @a, i64 0, i64 %77, i64 %81 + %84 = load double, double* %83, align 8, !tbaa !5 + %85 = getelementptr inbounds [100 x double], [100 x double]* %3, i64 0, i64 %81 + %86 = load double, double* %85, align 8, !tbaa !5 + %87 = fmul double %84, %86 + %88 = fsub double %82, %87 + %89 = add nuw nsw i64 %81, 1 + %90 = icmp eq i64 %89, %77 + br i1 %90, label %91, label %80, !llvm.loop !18 + +91: ; preds = %80 + %92 = getelementptr inbounds [100 x double], [100 x double]* %3, i64 0, i64 %77 + store double %88, double* %92, align 8, !tbaa !5 + %93 = add nuw nsw i64 %77, 1 + %94 = icmp eq i64 %93, %75 + br i1 %94, label %95, label %76, !llvm.loop !19 + +95: ; preds = %91, %69 + %96 = sext i32 %0 to i64 + %97 = getelementptr inbounds [100 x double], [100 x double]* %3, i64 0, i64 %96 + %98 = load double, double* %97, align 8, !tbaa !5 + %99 = getelementptr inbounds [50 x [50 x double]], [50 x [50 x double]]* @a, i64 0, i64 %96, i64 %96 + %100 = load double, double* %99, align 8, !tbaa !5 + %101 = fdiv double %98, %100 + %102 = getelementptr inbounds [50 x double], [50 x double]* @x, i64 0, i64 %96 + store double %101, double* %102, align 8, !tbaa !5 + %103 = icmp sgt i32 %0, 0 + br i1 %103, label %104, label %133 + +104: ; preds = %95 + %105 = sext i32 %0 to i64 + %106 = add i32 %0, 1 + %107 = sext i32 %0 to i64 + br label %108 + +108: ; preds = %104, %126 + %109 = phi i64 [ %105, %104 ], [ %110, %126 ] + %110 = add nsw i64 %109, -1 + %111 = getelementptr inbounds [100 x double], [100 x double]* %3, i64 0, i64 %110 + %112 = load double, double* %111, align 8, !tbaa !5 + %113 = icmp sgt i64 %109, %107 + br i1 %113, label %126, label %114 + +114: ; preds = %108, %114 + %115 = phi i64 [ %123, %114 ], [ %109, %108 ] + %116 = phi double [ %122, %114 ], [ %112, %108 ] + %117 = getelementptr inbounds [50 x [50 x double]], [50 x [50 x double]]* @a, i64 0, i64 %110, i64 %115 + %118 = load double, double* %117, align 8, !tbaa !5 + %119 = getelementptr inbounds [50 x double], [50 x double]* @x, i64 0, i64 %115 + %120 = load double, double* %119, align 8, !tbaa !5 + %121 = fmul double %118, %120 + %122 = fsub double %116, %121 + %123 = add nsw i64 %115, 1 + %124 = trunc i64 %123 to i32 + %125 = icmp eq i32 %106, %124 + br i1 %125, label %126, label %114, !llvm.loop !20 + +126: ; preds = %114, %108 + %127 = phi double [ %112, %108 ], [ %122, %114 ] + %128 = getelementptr inbounds [50 x [50 x double]], [50 x [50 x double]]* @a, i64 0, i64 %110, i64 %110 + %129 = load double, double* %128, align 8, !tbaa !5 + %130 = fdiv double %127, %129 + %131 = getelementptr inbounds [50 x double], [50 x double]* @x, i64 0, i64 %110 + store double %130, double* %131, align 8, !tbaa !5 + %132 = icmp sgt i64 %109, 1 + br i1 %132, label %108, label %133, !llvm.loop !21 + +133: ; preds = %18, %126, %95, %2 + %134 = phi i32 [ 999, %2 ], [ 0, %95 ], [ 0, %126 ], [ 1, %18 ] + call void @llvm.lifetime.end.p0i8(i64 800, i8* nonnull %4) #2 + ret i32 %134 +} + +; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn +declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) #1 + +attributes #0 = { nofree nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #1 = { argmemonly mustprogress nofree nosync nounwind willreturn } +attributes #2 = { nounwind } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = !{!6, !6, i64 0} +!6 = !{!"double", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C/C++ TBAA"} +!9 = distinct !{!9, !10, !11} +!10 = !{!"llvm.loop.mustprogress"} +!11 = !{!"llvm.loop.unroll.disable"} +!12 = distinct !{!12, !10, !11} +!13 = distinct !{!13, !10, !11} +!14 = distinct !{!14, !10, !11} +!15 = distinct !{!15, !10, !11} +!16 = distinct !{!16, !10, !11} +!17 = distinct !{!17, !10, !11} +!18 = distinct !{!18, !10, !11} +!19 = distinct !{!19, !10, !11} +!20 = distinct !{!20, !10, !11} +!21 = distinct !{!21, !10, !11} diff --git a/test/matmult.ll b/test/matmult.ll new file mode 100644 index 0000000..ab69972 --- /dev/null +++ b/test/matmult.ll @@ -0,0 +1,302 @@ +; ModuleID = 'matmult.c' +source_filename = "matmult.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +@ArrayA = dso_local local_unnamed_addr global [20 x [20 x i32]] zeroinitializer, align 16 +@ArrayB = dso_local local_unnamed_addr global [20 x [20 x i32]] zeroinitializer, align 16 +@ResultArray = dso_local local_unnamed_addr global [20 x [20 x i32]] zeroinitializer, align 16 +@Seed = dso_local local_unnamed_addr global i32 0, align 4 + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local void @main() local_unnamed_addr #0 { + store i32 0, i32* @Seed, align 4, !tbaa !5 + br label %1 + +1: ; preds = %13, %0 + %2 = phi i32 [ 0, %0 ], [ %9, %13 ] + %3 = phi i64 [ 0, %0 ], [ %14, %13 ] + br label %4 + +4: ; preds = %4, %1 + %5 = phi i32 [ %2, %1 ], [ %9, %4 ] + %6 = phi i64 [ 0, %1 ], [ %11, %4 ] + %7 = mul nsw i32 %5, 133 + %8 = add nsw i32 %7, 81 + %9 = srem i32 %8, 8095 + %10 = getelementptr inbounds [20 x [20 x i32]], [20 x [20 x i32]]* @ArrayA, i64 0, i64 %3, i64 %6 + store i32 %9, i32* %10, align 4, !tbaa !5 + %11 = add nuw nsw i64 %6, 1 + %12 = icmp eq i64 %11, 20 + br i1 %12, label %13, label %4, !llvm.loop !9 + +13: ; preds = %4 + %14 = add nuw nsw i64 %3, 1 + %15 = icmp eq i64 %14, 20 + br i1 %15, label %16, label %1, !llvm.loop !12 + +16: ; preds = %13 + store i32 %9, i32* @Seed, align 4, !tbaa !5 + br label %17 + +17: ; preds = %16, %29 + %18 = phi i32 [ %25, %29 ], [ %9, %16 ] + %19 = phi i64 [ %30, %29 ], [ 0, %16 ] + br label %20 + +20: ; preds = %20, %17 + %21 = phi i32 [ %18, %17 ], [ %25, %20 ] + %22 = phi i64 [ 0, %17 ], [ %27, %20 ] + %23 = mul nsw i32 %21, 133 + %24 = add nsw i32 %23, 81 + %25 = srem i32 %24, 8095 + %26 = getelementptr inbounds [20 x [20 x i32]], [20 x [20 x i32]]* @ArrayB, i64 0, i64 %19, i64 %22 + store i32 %25, i32* %26, align 4, !tbaa !5 + %27 = add nuw nsw i64 %22, 1 + %28 = icmp eq i64 %27, 20 + br i1 %28, label %29, label %20, !llvm.loop !9 + +29: ; preds = %20 + %30 = add nuw nsw i64 %19, 1 + %31 = icmp eq i64 %30, 20 + br i1 %31, label %32, label %17, !llvm.loop !12 + +32: ; preds = %29 + store i32 %25, i32* @Seed, align 4, !tbaa !5 + br label %33 + +33: ; preds = %32, %52 + %34 = phi i64 [ %53, %52 ], [ 0, %32 ] + br label %35 + +35: ; preds = %49, %33 + %36 = phi i64 [ 0, %33 ], [ %50, %49 ] + %37 = getelementptr inbounds [20 x [20 x i32]], [20 x [20 x i32]]* @ResultArray, i64 0, i64 %34, i64 %36 + store i32 0, i32* %37, align 4, !tbaa !5 + br label %38 + +38: ; preds = %38, %35 + %39 = phi i32 [ 0, %35 ], [ %46, %38 ] + %40 = phi i64 [ 0, %35 ], [ %47, %38 ] + %41 = getelementptr inbounds [20 x [20 x i32]], [20 x [20 x i32]]* @ArrayA, i64 0, i64 %34, i64 %40 + %42 = load i32, i32* %41, align 4, !tbaa !5 + %43 = getelementptr inbounds [20 x [20 x i32]], [20 x [20 x i32]]* @ArrayB, i64 0, i64 %40, i64 %36 + %44 = load i32, i32* %43, align 4, !tbaa !5 + %45 = mul nsw i32 %44, %42 + %46 = add nsw i32 %39, %45 + %47 = add nuw nsw i64 %40, 1 + %48 = icmp eq i64 %47, 20 + br i1 %48, label %49, label %38, !llvm.loop !13 + +49: ; preds = %38 + store i32 %46, i32* %37, align 4, !tbaa !5 + %50 = add nuw nsw i64 %36, 1 + %51 = icmp eq i64 %50, 20 + br i1 %51, label %52, label %35, !llvm.loop !14 + +52: ; preds = %49 + %53 = add nuw nsw i64 %34, 1 + %54 = icmp eq i64 %53, 20 + br i1 %54, label %55, label %33, !llvm.loop !15 + +55: ; preds = %52 + ret void +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn writeonly +define dso_local void @InitSeed() local_unnamed_addr #1 { + store i32 0, i32* @Seed, align 4, !tbaa !5 + ret void +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local void @Test([20 x i32]* nocapture %0, [20 x i32]* nocapture %1, [20 x i32]* nocapture %2) local_unnamed_addr #0 { + br label %4 + +4: ; preds = %15, %3 + %5 = phi i64 [ 0, %3 ], [ %16, %15 ] + br label %6 + +6: ; preds = %6, %4 + %7 = phi i64 [ 0, %4 ], [ %13, %6 ] + %8 = load i32, i32* @Seed, align 4, !tbaa !5 + %9 = mul nsw i32 %8, 133 + %10 = add nsw i32 %9, 81 + %11 = srem i32 %10, 8095 + store i32 %11, i32* @Seed, align 4, !tbaa !5 + %12 = getelementptr inbounds [20 x i32], [20 x i32]* %0, i64 %5, i64 %7 + store i32 %11, i32* %12, align 4, !tbaa !5 + %13 = add nuw nsw i64 %7, 1 + %14 = icmp eq i64 %13, 20 + br i1 %14, label %15, label %6, !llvm.loop !9 + +15: ; preds = %6 + %16 = add nuw nsw i64 %5, 1 + %17 = icmp eq i64 %16, 20 + br i1 %17, label %18, label %4, !llvm.loop !12 + +18: ; preds = %15, %29 + %19 = phi i64 [ %30, %29 ], [ 0, %15 ] + br label %20 + +20: ; preds = %20, %18 + %21 = phi i64 [ 0, %18 ], [ %27, %20 ] + %22 = load i32, i32* @Seed, align 4, !tbaa !5 + %23 = mul nsw i32 %22, 133 + %24 = add nsw i32 %23, 81 + %25 = srem i32 %24, 8095 + store i32 %25, i32* @Seed, align 4, !tbaa !5 + %26 = getelementptr inbounds [20 x i32], [20 x i32]* %1, i64 %19, i64 %21 + store i32 %25, i32* %26, align 4, !tbaa !5 + %27 = add nuw nsw i64 %21, 1 + %28 = icmp eq i64 %27, 20 + br i1 %28, label %29, label %20, !llvm.loop !9 + +29: ; preds = %20 + %30 = add nuw nsw i64 %19, 1 + %31 = icmp eq i64 %30, 20 + br i1 %31, label %32, label %18, !llvm.loop !12 + +32: ; preds = %29, %51 + %33 = phi i64 [ %52, %51 ], [ 0, %29 ] + br label %34 + +34: ; preds = %48, %32 + %35 = phi i64 [ 0, %32 ], [ %49, %48 ] + %36 = getelementptr inbounds [20 x i32], [20 x i32]* %2, i64 %33, i64 %35 + store i32 0, i32* %36, align 4, !tbaa !5 + br label %37 + +37: ; preds = %37, %34 + %38 = phi i64 [ 0, %34 ], [ %46, %37 ] + %39 = getelementptr inbounds [20 x i32], [20 x i32]* %0, i64 %33, i64 %38 + %40 = load i32, i32* %39, align 4, !tbaa !5 + %41 = getelementptr inbounds [20 x i32], [20 x i32]* %1, i64 %38, i64 %35 + %42 = load i32, i32* %41, align 4, !tbaa !5 + %43 = mul nsw i32 %42, %40 + %44 = load i32, i32* %36, align 4, !tbaa !5 + %45 = add nsw i32 %44, %43 + store i32 %45, i32* %36, align 4, !tbaa !5 + %46 = add nuw nsw i64 %38, 1 + %47 = icmp eq i64 %46, 20 + br i1 %47, label %48, label %37, !llvm.loop !13 + +48: ; preds = %37 + %49 = add nuw nsw i64 %35, 1 + %50 = icmp eq i64 %49, 20 + br i1 %50, label %51, label %34, !llvm.loop !14 + +51: ; preds = %48 + %52 = add nuw nsw i64 %33, 1 + %53 = icmp eq i64 %52, 20 + br i1 %53, label %54, label %32, !llvm.loop !15 + +54: ; preds = %51 + ret void +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local void @Initialize([20 x i32]* nocapture %0) local_unnamed_addr #0 { + br label %2 + +2: ; preds = %1, %13 + %3 = phi i64 [ 0, %1 ], [ %14, %13 ] + br label %4 + +4: ; preds = %2, %4 + %5 = phi i64 [ 0, %2 ], [ %11, %4 ] + %6 = load i32, i32* @Seed, align 4, !tbaa !5 + %7 = mul nsw i32 %6, 133 + %8 = add nsw i32 %7, 81 + %9 = srem i32 %8, 8095 + store i32 %9, i32* @Seed, align 4, !tbaa !5 + %10 = getelementptr inbounds [20 x i32], [20 x i32]* %0, i64 %3, i64 %5 + store i32 %9, i32* %10, align 4, !tbaa !5 + %11 = add nuw nsw i64 %5, 1 + %12 = icmp eq i64 %11, 20 + br i1 %12, label %13, label %4, !llvm.loop !9 + +13: ; preds = %4 + %14 = add nuw nsw i64 %3, 1 + %15 = icmp eq i64 %14, 20 + br i1 %15, label %16, label %2, !llvm.loop !12 + +16: ; preds = %13 + ret void +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local void @Multiply([20 x i32]* nocapture readonly %0, [20 x i32]* nocapture readonly %1, [20 x i32]* nocapture %2) local_unnamed_addr #0 { + br label %4 + +4: ; preds = %3, %23 + %5 = phi i64 [ 0, %3 ], [ %24, %23 ] + br label %6 + +6: ; preds = %4, %20 + %7 = phi i64 [ 0, %4 ], [ %21, %20 ] + %8 = getelementptr inbounds [20 x i32], [20 x i32]* %2, i64 %5, i64 %7 + store i32 0, i32* %8, align 4, !tbaa !5 + br label %9 + +9: ; preds = %6, %9 + %10 = phi i64 [ 0, %6 ], [ %18, %9 ] + %11 = getelementptr inbounds [20 x i32], [20 x i32]* %0, i64 %5, i64 %10 + %12 = load i32, i32* %11, align 4, !tbaa !5 + %13 = getelementptr inbounds [20 x i32], [20 x i32]* %1, i64 %10, i64 %7 + %14 = load i32, i32* %13, align 4, !tbaa !5 + %15 = mul nsw i32 %14, %12 + %16 = load i32, i32* %8, align 4, !tbaa !5 + %17 = add nsw i32 %16, %15 + store i32 %17, i32* %8, align 4, !tbaa !5 + %18 = add nuw nsw i64 %10, 1 + %19 = icmp eq i64 %18, 20 + br i1 %19, label %20, label %9, !llvm.loop !13 + +20: ; preds = %9 + %21 = add nuw nsw i64 %7, 1 + %22 = icmp eq i64 %21, 20 + br i1 %22, label %23, label %6, !llvm.loop !14 + +23: ; preds = %20 + %24 = add nuw nsw i64 %5, 1 + %25 = icmp eq i64 %24, 20 + br i1 %25, label %26, label %4, !llvm.loop !15 + +26: ; preds = %23 + ret void +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn +define dso_local i32 @RandomInteger() local_unnamed_addr #2 { + %1 = load i32, i32* @Seed, align 4, !tbaa !5 + %2 = mul nsw i32 %1, 133 + %3 = add nsw i32 %2, 81 + %4 = srem i32 %3, 8095 + store i32 %4, i32* @Seed, align 4, !tbaa !5 + ret i32 %4 +} + +attributes #0 = { nofree norecurse nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #1 = { mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn writeonly "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #2 = { mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = !{!6, !6, i64 0} +!6 = !{!"int", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C/C++ TBAA"} +!9 = distinct !{!9, !10, !11} +!10 = !{!"llvm.loop.mustprogress"} +!11 = !{!"llvm.loop.unroll.disable"} +!12 = distinct !{!12, !10, !11} +!13 = distinct !{!13, !10, !11} +!14 = distinct !{!14, !10, !11} +!15 = distinct !{!15, !10, !11} diff --git a/test/minver.ll b/test/minver.ll new file mode 100644 index 0000000..2eda1bb --- /dev/null +++ b/test/minver.ll @@ -0,0 +1,403 @@ +; ModuleID = 'minver.c' +source_filename = "minver.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +@a = internal unnamed_addr global [3 x [3 x double]] [[3 x double] [double 3.000000e+00, double -6.000000e+00, double 7.000000e+00], [3 x double] [double 9.000000e+00, double 0.000000e+00, double -5.000000e+00], [3 x double] [double 5.000000e+00, double -8.000000e+00, double 6.000000e+00]], align 16 +@aa = dso_local local_unnamed_addr global [3 x [3 x double]] zeroinitializer, align 16 +@a_i = dso_local local_unnamed_addr global [3 x [3 x double]] zeroinitializer, align 16 +@b = dso_local local_unnamed_addr global [3 x [3 x double]] zeroinitializer, align 16 +@c = dso_local local_unnamed_addr global [3 x [3 x double]] zeroinitializer, align 16 +@det = dso_local local_unnamed_addr global double 0.000000e+00, align 8 +@e = dso_local local_unnamed_addr global [3 x [3 x double]] zeroinitializer, align 16 + +; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone sspstrong uwtable willreturn +define dso_local double @minver_fabs(double %0) local_unnamed_addr #0 { + %2 = fcmp ult double %0, 0.000000e+00 + %3 = fneg double %0 + %4 = select i1 %2, double %3, double %0 + ret double %4 +} + +; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn +declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #1 + +; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn +declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) #1 + +; Function Attrs: nofree nosync nounwind sspstrong uwtable +define dso_local i32 @main() local_unnamed_addr #2 { + call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(72) bitcast ([3 x [3 x double]]* @aa to i8*), i8* noundef nonnull align 16 dereferenceable(72) bitcast ([3 x [3 x double]]* @a to i8*), i64 72, i1 false) + %1 = call i32 @minver(i32 3, i32 3, double 0x3EB0C6F7A0B5ED8D) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(72) bitcast ([3 x [3 x double]]* @a_i to i8*), i8* noundef nonnull align 16 dereferenceable(72) bitcast ([3 x [3 x double]]* @a to i8*), i64 72, i1 false) + br label %2 + +2: ; preds = %0, %21 + %3 = phi i64 [ %22, %21 ], [ 0, %0 ] + br label %4 + +4: ; preds = %17, %2 + %5 = phi i64 [ 0, %2 ], [ %19, %17 ] + br label %6 + +6: ; preds = %6, %4 + %7 = phi i64 [ 0, %4 ], [ %15, %6 ] + %8 = phi double [ 0.000000e+00, %4 ], [ %14, %6 ] + %9 = getelementptr inbounds [3 x [3 x double]], [3 x [3 x double]]* @a, i64 0, i64 %3, i64 %7 + %10 = load double, double* %9, align 8, !tbaa !5 + %11 = getelementptr inbounds [3 x [3 x double]], [3 x [3 x double]]* @b, i64 0, i64 %7, i64 %5 + %12 = load double, double* %11, align 8, !tbaa !5 + %13 = fmul double %10, %12 + %14 = fadd double %8, %13 + %15 = add nuw nsw i64 %7, 1 + %16 = icmp eq i64 %15, 3 + br i1 %16, label %17, label %6, !llvm.loop !9 + +17: ; preds = %6 + %18 = getelementptr inbounds [3 x [3 x double]], [3 x [3 x double]]* @c, i64 0, i64 %3, i64 %5 + store double %14, double* %18, align 8, !tbaa !5 + %19 = add nuw nsw i64 %5, 1 + %20 = icmp eq i64 %19, 3 + br i1 %20, label %21, label %4, !llvm.loop !12 + +21: ; preds = %17 + %22 = add nuw nsw i64 %3, 1 + %23 = icmp eq i64 %22, 3 + br i1 %23, label %24, label %2, !llvm.loop !13 + +24: ; preds = %21 + ret i32 0 +} + +; Function Attrs: nofree nosync nounwind sspstrong uwtable +define dso_local i32 @minver(i32 %0, i32 %1, double %2) local_unnamed_addr #2 { + %4 = alloca [500 x i32], align 16 + %5 = bitcast [500 x i32]* %4 to i8* + call void @llvm.lifetime.start.p0i8(i64 2000, i8* nonnull %5) #5 + %6 = add i32 %0, -2 + %7 = icmp ugt i32 %6, 498 + %8 = fcmp ole double %2, 0.000000e+00 + %9 = select i1 %7, i1 true, i1 %8 + br i1 %9, label %155, label %10 + +10: ; preds = %3 + %11 = icmp sgt i32 %0, 0 + br i1 %11, label %12, label %14 + +12: ; preds = %10 + %13 = zext i32 %0 to i64 + br label %27 + +14: ; preds = %27, %10 + %15 = icmp sgt i32 %0, 0 + %16 = icmp sgt i32 %0, 0 + %17 = icmp sgt i32 %0, 0 + %18 = icmp sgt i32 %0, 0 + %19 = icmp sgt i32 %0, 0 + br i1 %19, label %20, label %37 + +20: ; preds = %14 + %21 = zext i32 %0 to i64 + %22 = zext i32 %0 to i64 + %23 = zext i32 %0 to i64 + %24 = zext i32 %0 to i64 + %25 = zext i32 %0 to i64 + %26 = zext i32 %0 to i64 + br label %33 + +27: ; preds = %12, %27 + %28 = phi i64 [ 0, %12 ], [ %31, %27 ] + %29 = getelementptr inbounds [500 x i32], [500 x i32]* %4, i64 0, i64 %28 + %30 = trunc i64 %28 to i32 + store i32 %30, i32* %29, align 4, !tbaa !14 + %31 = add nuw nsw i64 %28, 1 + %32 = icmp eq i64 %31, %13 + br i1 %32, label %14, label %27, !llvm.loop !16 + +33: ; preds = %20, %121 + %34 = phi i64 [ 0, %20 ], [ %124, %121 ] + %35 = phi double [ 1.000000e+00, %20 ], [ %85, %121 ] + %36 = phi i32 [ undef, %20 ], [ %54, %121 ] + br label %43 + +37: ; preds = %121, %14 + %38 = phi double [ 1.000000e+00, %14 ], [ %85, %121 ] + %39 = icmp sgt i32 %0, 0 + %40 = icmp sgt i32 %0, 0 + br i1 %40, label %41, label %152 + +41: ; preds = %37 + %42 = zext i32 %0 to i64 + br label %126 + +43: ; preds = %33, %43 + %44 = phi i64 [ %34, %33 ], [ %56, %43 ] + %45 = phi double [ 0.000000e+00, %33 ], [ %55, %43 ] + %46 = phi i32 [ %36, %33 ], [ %54, %43 ] + %47 = getelementptr inbounds [3 x [3 x double]], [3 x [3 x double]]* @a, i64 0, i64 %44, i64 %34 + %48 = load double, double* %47, align 8, !tbaa !5 + %49 = fcmp ult double %48, 0.000000e+00 + %50 = fneg double %48 + %51 = select i1 %49, double %50, double %48 + %52 = fcmp ogt double %51, %45 + %53 = trunc i64 %44 to i32 + %54 = select i1 %52, i32 %53, i32 %46 + %55 = select i1 %52, double %51, double %45 + %56 = add nuw nsw i64 %44, 1 + %57 = icmp eq i64 %56, %22 + br i1 %57, label %58, label %43, !llvm.loop !17 + +58: ; preds = %43 + %59 = sext i32 %54 to i64 + %60 = getelementptr inbounds [3 x [3 x double]], [3 x [3 x double]]* @a, i64 0, i64 %59, i64 %34 + %61 = load double, double* %60, align 8, !tbaa !5 + %62 = fcmp ult double %61, 0.000000e+00 + %63 = fneg double %61 + %64 = select i1 %62, double %63, double %61 + %65 = fcmp ugt double %64, %2 + br i1 %65, label %66, label %152 + +66: ; preds = %58 + %67 = fmul double %35, %61 + %68 = zext i32 %54 to i64 + %69 = icmp eq i64 %34, %68 + br i1 %69, label %84, label %70 + +70: ; preds = %66 + %71 = fneg double %51 + %72 = getelementptr inbounds [500 x i32], [500 x i32]* %4, i64 0, i64 %34 + %73 = load i32, i32* %72, align 4, !tbaa !14 + %74 = getelementptr inbounds [500 x i32], [500 x i32]* %4, i64 0, i64 %59 + %75 = load i32, i32* %74, align 4, !tbaa !14 + store i32 %75, i32* %72, align 4, !tbaa !14 + store i32 %73, i32* %74, align 4, !tbaa !14 + br i1 %15, label %76, label %84 + +76: ; preds = %70, %76 + %77 = phi i64 [ %82, %76 ], [ 0, %70 ] + %78 = getelementptr inbounds [3 x [3 x double]], [3 x [3 x double]]* @a, i64 0, i64 %34, i64 %77 + %79 = load double, double* %78, align 8, !tbaa !5 + %80 = getelementptr inbounds [3 x [3 x double]], [3 x [3 x double]]* @a, i64 0, i64 %59, i64 %77 + %81 = load double, double* %80, align 8, !tbaa !5 + store double %81, double* %78, align 8, !tbaa !5 + store double %79, double* %80, align 8, !tbaa !5 + %82 = add nuw nsw i64 %77, 1 + %83 = icmp eq i64 %82, %23 + br i1 %83, label %84, label %76, !llvm.loop !18 + +84: ; preds = %76, %70, %66 + %85 = phi double [ %67, %66 ], [ %71, %70 ], [ %71, %76 ] + br i1 %16, label %87, label %86 + +86: ; preds = %87, %84 + br i1 %18, label %94, label %121 + +87: ; preds = %84, %87 + %88 = phi i64 [ %92, %87 ], [ 0, %84 ] + %89 = getelementptr inbounds [3 x [3 x double]], [3 x [3 x double]]* @a, i64 0, i64 %34, i64 %88 + %90 = load double, double* %89, align 8, !tbaa !5 + %91 = fdiv double %90, %61 + store double %91, double* %89, align 8, !tbaa !5 + %92 = add nuw nsw i64 %88, 1 + %93 = icmp eq i64 %92, %24 + br i1 %93, label %86, label %87, !llvm.loop !19 + +94: ; preds = %86, %118 + %95 = phi i64 [ %119, %118 ], [ 0, %86 ] + %96 = icmp eq i64 %95, %34 + br i1 %96, label %118, label %97 + +97: ; preds = %94 + %98 = getelementptr inbounds [3 x [3 x double]], [3 x [3 x double]]* @a, i64 0, i64 %95, i64 %34 + %99 = load double, double* %98, align 8, !tbaa !5 + %100 = fcmp une double %99, 0.000000e+00 + br i1 %100, label %101, label %118 + +101: ; preds = %97 + br i1 %17, label %102, label %115 + +102: ; preds = %101, %112 + %103 = phi i64 [ %113, %112 ], [ 0, %101 ] + %104 = icmp eq i64 %103, %34 + br i1 %104, label %112, label %105 + +105: ; preds = %102 + %106 = getelementptr inbounds [3 x [3 x double]], [3 x [3 x double]]* @a, i64 0, i64 %34, i64 %103 + %107 = load double, double* %106, align 8, !tbaa !5 + %108 = fmul double %99, %107 + %109 = getelementptr inbounds [3 x [3 x double]], [3 x [3 x double]]* @a, i64 0, i64 %95, i64 %103 + %110 = load double, double* %109, align 8, !tbaa !5 + %111 = fsub double %110, %108 + store double %111, double* %109, align 8, !tbaa !5 + br label %112 + +112: ; preds = %102, %105 + %113 = add nuw nsw i64 %103, 1 + %114 = icmp eq i64 %113, %26 + br i1 %114, label %115, label %102, !llvm.loop !20 + +115: ; preds = %112, %101 + %116 = fneg double %99 + %117 = fdiv double %116, %61 + store double %117, double* %98, align 8, !tbaa !5 + br label %118 + +118: ; preds = %94, %115, %97 + %119 = add nuw nsw i64 %95, 1 + %120 = icmp eq i64 %119, %25 + br i1 %120, label %121, label %94, !llvm.loop !21 + +121: ; preds = %118, %86 + %122 = fdiv double 1.000000e+00, %61 + %123 = getelementptr inbounds [3 x [3 x double]], [3 x [3 x double]]* @a, i64 0, i64 %34, i64 %34 + store double %122, double* %123, align 8, !tbaa !5 + %124 = add nuw nsw i64 %34, 1 + %125 = icmp eq i64 %124, %21 + br i1 %125, label %37, label %33, !llvm.loop !22 + +126: ; preds = %41, %149 + %127 = phi i64 [ 0, %41 ], [ %150, %149 ] + %128 = getelementptr inbounds [500 x i32], [500 x i32]* %4, i64 0, i64 %127 + %129 = load i32, i32* %128, align 4, !tbaa !14 + %130 = zext i32 %129 to i64 + %131 = icmp eq i64 %127, %130 + br i1 %131, label %149, label %136 + +132: ; preds = %143, %136 + %133 = load i32, i32* %128, align 4, !tbaa !14 + %134 = zext i32 %133 to i64 + %135 = icmp eq i64 %127, %134 + br i1 %135, label %149, label %136, !llvm.loop !23 + +136: ; preds = %126, %132 + %137 = phi i32 [ %133, %132 ], [ %129, %126 ] + %138 = sext i32 %137 to i64 + %139 = getelementptr inbounds [500 x i32], [500 x i32]* %4, i64 0, i64 %138 + %140 = load i32, i32* %139, align 4, !tbaa !14 + store i32 %137, i32* %139, align 4, !tbaa !14 + store i32 %140, i32* %128, align 4, !tbaa !14 + %141 = getelementptr inbounds [3 x [3 x double]], [3 x [3 x double]]* @a, i64 0, i64 %138, i64 %127 + %142 = getelementptr inbounds [3 x [3 x double]], [3 x [3 x double]]* @a, i64 0, i64 %138, i64 %138 + br i1 %39, label %143, label %132 + +143: ; preds = %136, %143 + %144 = phi i32 [ %147, %143 ], [ 0, %136 ] + %145 = load double, double* %141, align 8, !tbaa !5 + %146 = load double, double* %142, align 8, !tbaa !5 + store double %146, double* %141, align 8, !tbaa !5 + store double %145, double* %142, align 8, !tbaa !5 + %147 = add nuw nsw i32 %144, 1 + %148 = icmp eq i32 %147, %0 + br i1 %148, label %132, label %143, !llvm.loop !24 + +149: ; preds = %132, %126 + %150 = add nuw nsw i64 %127, 1 + %151 = icmp eq i64 %150, %42 + br i1 %151, label %152, label %126, !llvm.loop !25 + +152: ; preds = %58, %149, %37 + %153 = phi double [ %38, %37 ], [ %38, %149 ], [ %35, %58 ] + %154 = phi i32 [ 0, %37 ], [ 0, %149 ], [ 1, %58 ] + store double %153, double* @det, align 8, !tbaa !5 + br label %155 + +155: ; preds = %152, %3 + %156 = phi i32 [ 999, %3 ], [ %154, %152 ] + call void @llvm.lifetime.end.p0i8(i64 2000, i8* nonnull %5) #5 + ret i32 %156 +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local i32 @mmul(i32 %0, i32 %1, i32 %2, i32 %3) local_unnamed_addr #3 { + %5 = icmp sgt i32 %0, 0 + %6 = icmp sgt i32 %2, 0 + %7 = select i1 %5, i1 %6, i1 false + %8 = icmp sgt i32 %3, 0 + %9 = select i1 %7, i1 %8, i1 false + %10 = icmp eq i32 %1, %2 + %11 = select i1 %9, i1 %10, i1 false + br i1 %11, label %12, label %38 + +12: ; preds = %4 + %13 = zext i32 %0 to i64 + %14 = zext i32 %3 to i64 + %15 = zext i32 %2 to i64 + br label %16 + +16: ; preds = %12, %35 + %17 = phi i64 [ 0, %12 ], [ %36, %35 ] + br label %18 + +18: ; preds = %16, %31 + %19 = phi i64 [ 0, %16 ], [ %33, %31 ] + br label %20 + +20: ; preds = %18, %20 + %21 = phi i64 [ 0, %18 ], [ %29, %20 ] + %22 = phi double [ 0.000000e+00, %18 ], [ %28, %20 ] + %23 = getelementptr inbounds [3 x [3 x double]], [3 x [3 x double]]* @a, i64 0, i64 %17, i64 %21 + %24 = load double, double* %23, align 8, !tbaa !5 + %25 = getelementptr inbounds [3 x [3 x double]], [3 x [3 x double]]* @b, i64 0, i64 %21, i64 %19 + %26 = load double, double* %25, align 8, !tbaa !5 + %27 = fmul double %24, %26 + %28 = fadd double %22, %27 + %29 = add nuw nsw i64 %21, 1 + %30 = icmp eq i64 %29, %15 + br i1 %30, label %31, label %20, !llvm.loop !9 + +31: ; preds = %20 + %32 = getelementptr inbounds [3 x [3 x double]], [3 x [3 x double]]* @c, i64 0, i64 %17, i64 %19 + store double %28, double* %32, align 8, !tbaa !5 + %33 = add nuw nsw i64 %19, 1 + %34 = icmp eq i64 %33, %14 + br i1 %34, label %35, label %18, !llvm.loop !12 + +35: ; preds = %31 + %36 = add nuw nsw i64 %17, 1 + %37 = icmp eq i64 %36, %13 + br i1 %37, label %38, label %16, !llvm.loop !13 + +38: ; preds = %35, %4 + %39 = phi i32 [ 999, %4 ], [ 0, %35 ] + ret i32 %39 +} + +; Function Attrs: argmemonly nofree nounwind willreturn +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg) #4 + +attributes #0 = { mustprogress nofree norecurse nosync nounwind readnone sspstrong uwtable willreturn "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #1 = { argmemonly mustprogress nofree nosync nounwind willreturn } +attributes #2 = { nofree nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #3 = { nofree norecurse nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #4 = { argmemonly nofree nounwind willreturn } +attributes #5 = { nounwind } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = !{!6, !6, i64 0} +!6 = !{!"double", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C/C++ TBAA"} +!9 = distinct !{!9, !10, !11} +!10 = !{!"llvm.loop.mustprogress"} +!11 = !{!"llvm.loop.unroll.disable"} +!12 = distinct !{!12, !10, !11} +!13 = distinct !{!13, !10, !11} +!14 = !{!15, !15, i64 0} +!15 = !{!"int", !7, i64 0} +!16 = distinct !{!16, !10, !11} +!17 = distinct !{!17, !10, !11} +!18 = distinct !{!18, !10, !11} +!19 = distinct !{!19, !10, !11} +!20 = distinct !{!20, !10, !11} +!21 = distinct !{!21, !10, !11} +!22 = distinct !{!22, !10, !11} +!23 = distinct !{!23, !11} +!24 = distinct !{!24, !10, !11} +!25 = distinct !{!25, !10, !11} diff --git a/test/ndes.ll b/test/ndes.ll new file mode 100644 index 0000000..cc3c867 --- /dev/null +++ b/test/ndes.ll @@ -0,0 +1,590 @@ +; ModuleID = 'ndes.c' +source_filename = "ndes.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +%struct.GREAT = type { i64, i64, i64 } +%struct.IMMENSE = type { i64, i64 } + +@des.ip = internal unnamed_addr constant [65 x i8] c"\00:2*\22\1A\12\0A\02<4,$\1C\14\0C\04>6.&\1E\16\0E\06@80( \18\10\0891)!\19\11\09\01;3+#\1B\13\0B\03=5-%\1D\15\0D\05?7/'\1F\17\0F\07", align 16 +@des.ipm = internal unnamed_addr constant [65 x i8] c"\00(\080\108\18@ '\07/\0F7\17?\1F&\06.\0E6\16>\1E%\05-\0D5\15=\1D$\04,\0C4\14<\1C#\03+\0B3\13;\1B\22\02*\0A2\12:\1A!\01)\091\119\19", align 16 +@des.kns = internal global [17 x %struct.GREAT] zeroinitializer, align 16 +@des.initflag = internal unnamed_addr global i1 false, align 4 +@bit = dso_local local_unnamed_addr global [33 x i64] zeroinitializer, align 16 +@icd = internal unnamed_addr global %struct.IMMENSE zeroinitializer, align 8 +@ipc1 = internal unnamed_addr constant [57 x i8] c"\0091)!\19\11\09\01:2*\22\1A\12\0A\02;3+#\1B\13\0B\03<4,$?7/'\1F\17\0F\07>6.&\1E\16\0E\06=5-%\1D\15\0D\05\1C\14\0C\04", align 16 +@ipc2 = internal unnamed_addr constant [49 x i8] c"\00\0E\11\0B\18\01\05\03\1C\0F\06\15\0A\17\13\0C\04\1A\08\10\07\1B\14\0D\02)4\1F%/7\1E(3-!0,1'8\225.*2$\1D ", align 16 +@cyfun.iet = internal unnamed_addr constant [49 x i32] [i32 0, i32 32, i32 1, i32 2, i32 3, i32 4, i32 5, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 28, i32 29, i32 30, i32 31, i32 32, i32 1], align 16 +@cyfun.ipp = internal unnamed_addr constant [33 x i32] [i32 0, i32 16, i32 7, i32 20, i32 21, i32 29, i32 12, i32 28, i32 17, i32 1, i32 15, i32 23, i32 26, i32 5, i32 18, i32 31, i32 10, i32 2, i32 8, i32 24, i32 14, i32 32, i32 27, i32 3, i32 9, i32 19, i32 13, i32 30, i32 6, i32 22, i32 11, i32 4, i32 25], align 16 +@cyfun.is = internal unnamed_addr constant [16 x [4 x [9 x i8]]] [[4 x [9 x i8]] [[9 x i8] c"\00\0E\0F\0A\07\02\0C\04\0D", [9 x i8] c"\00\00\03\0D\0D\0E\0A\0D\01", [9 x i8] c"\00\04\00\0D\0A\04\09\01\07", [9 x i8] c"\00\0F\0D\01\03\0B\04\06\02"], [4 x [9 x i8]] [[9 x i8] c"\00\04\01\00\0D\0C\01\0B\02", [9 x i8] c"\00\0F\0D\07\08\0B\0F\00\0F", [9 x i8] c"\00\01\0E\06\06\02\0E\04\0B", [9 x i8] c"\00\0C\08\0A\0F\08\03\0B\01"], [4 x [9 x i8]] [[9 x i8] c"\00\0D\08\09\0E\04\0A\02\08", [9 x i8] c"\00\07\04\00\0B\02\04\0B\0D", [9 x i8] c"\00\0E\07\04\09\01\0F\0B\04", [9 x i8] c"\00\08\0A\0D\00\0C\02\0D\0E"], [4 x [9 x i8]] [[9 x i8] c"\00\01\0E\0E\03\01\0F\0E\04", [9 x i8] c"\00\04\07\09\05\0C\02\07\08", [9 x i8] c"\00\08\0B\09\00\0B\05\0D\01", [9 x i8] c"\00\02\01\00\06\07\0C\08\07"], [4 x [9 x i8]] [[9 x i8] c"\00\02\06\06\00\07\09\0F\06", [9 x i8] c"\00\0E\0F\03\06\04\07\04\0A", [9 x i8] c"\00\0D\0A\08\0C\0A\02\0C\09", [9 x i8] c"\00\04\03\06\0A\01\09\01\04"], [4 x [9 x i8]] [[9 x i8] c"\00\0F\0B\03\06\0A\02\00\0F", [9 x i8] c"\00\02\02\04\0F\07\0C\09\03", [9 x i8] c"\00\06\04\0F\0B\0D\08\03\0C", [9 x i8] c"\00\09\0F\09\01\0E\05\04\0A"], [4 x [9 x i8]] [[9 x i8] c"\00\0B\03\0F\09\0B\06\08\0B", [9 x i8] c"\00\0D\08\06\00\0D\09\01\07", [9 x i8] c"\00\02\0D\03\07\07\0C\07\0E", [9 x i8] c"\00\01\04\08\0D\02\0F\0A\08"], [4 x [9 x i8]] [[9 x i8] c"\00\08\04\05\0A\06\08\0D\01", [9 x i8] c"\00\01\0E\0A\03\01\05\0A\04", [9 x i8] c"\00\0B\01\00\0D\08\03\0E\02", [9 x i8] c"\00\07\02\07\08\0D\0A\07\0D"], [4 x [9 x i8]] [[9 x i8] c"\00\03\09\01\01\08\00\03\0A", [9 x i8] c"\00\0A\0C\02\04\05\06\0E\0C", [9 x i8] c"\00\0F\05\0B\0F\0F\07\0A\00", [9 x i8] c"\00\05\0B\04\09\06\0B\09\0F"], [4 x [9 x i8]] [[9 x i8] c"\00\0A\07\0D\02\05\0D\0C\09", [9 x i8] c"\00\06\00\08\07\00\01\03\05", [9 x i8] c"\00\0C\08\01\01\09\00\0F\06", [9 x i8] c"\00\0B\06\0F\04\0F\0E\05\0C"], [4 x [9 x i8]] [[9 x i8] c"\00\06\02\0C\08\03\03\09\03", [9 x i8] c"\00\0C\01\05\02\0F\0D\05\06", [9 x i8] c"\00\09\0C\02\03\0C\04\06\0A", [9 x i8] c"\00\03\07\0E\05\00\01\00\09"], [4 x [9 x i8]] [[9 x i8] c"\00\0C\0D\07\05\0F\04\07\0E", [9 x i8] c"\00\0B\0A\0E\0C\0A\0E\0C\0B", [9 x i8] c"\00\07\06\0C\0E\05\0A\08\0D", [9 x i8] c"\00\0E\0C\03\0B\09\07\0F\00"], [4 x [9 x i8]] [[9 x i8] c"\00\05\0C\0B\0B\0D\0E\05\05", [9 x i8] c"\00\09\06\0C\01\03\00\02\00", [9 x i8] c"\00\03\09\05\05\06\01\00\0F", [9 x i8] c"\00\0A\00\0B\0C\0A\06\0E\03"], [4 x [9 x i8]] [[9 x i8] c"\00\09\00\04\0C\00\07\0A\00", [9 x i8] c"\00\05\09\0B\0A\09\0B\0F\0E", [9 x i8] c"\00\0A\03\0A\02\03\0D\05\03", [9 x i8] c"\00\00\05\05\07\04\00\02\05"], [4 x [9 x i8]] [[9 x i8] c"\00\00\05\02\04\0E\05\06\0C", [9 x i8] c"\00\03\0B\0F\0E\08\03\08\09", [9 x i8] c"\00\05\02\0E\08\00\0B\09\05", [9 x i8] c"\00\06\0E\02\02\05\08\03\06"], [4 x [9 x i8]] [[9 x i8] c"\00\07\0A\08\0F\09\0B\01\07", [9 x i8] c"\00\08\05\01\09\06\08\06\02", [9 x i8] c"\00\00\0F\07\04\0E\06\02\08", [9 x i8] c"\00\0D\09\0C\0E\03\0D\0C\0B"]], align 16 +@cyfun.ibin = internal unnamed_addr constant [16 x i8] c"\00\08\04\0C\02\0A\06\0E\01\09\05\0D\03\0B\07\0F", align 16 +@value = dso_local local_unnamed_addr global i32 1, align 4 + +; Function Attrs: nofree nosync nounwind sspstrong uwtable +define dso_local void @des(i64 %0, i64 %1, i64 %2, i64 %3, i32* nocapture %4, i32 %5, %struct.IMMENSE* nocapture %6) local_unnamed_addr #0 { + %8 = alloca i64, align 8 + %9 = alloca %struct.GREAT, align 8 + %10 = bitcast i64* %8 to i8* + call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %10) #6 + %11 = bitcast %struct.GREAT* %9 to i8* + call void @llvm.lifetime.start.p0i8(i64 24, i8* nonnull %11) #6 + %12 = load i1, i1* @des.initflag, align 4 + br i1 %12, label %21, label %13 + +13: ; preds = %7 + store i1 true, i1* @des.initflag, align 4 + store i64 1, i64* getelementptr inbounds ([33 x i64], [33 x i64]* @bit, i64 0, i64 1), align 8, !tbaa !5 + br label %14 + +14: ; preds = %13, %14 + %15 = phi i64 [ 2, %13 ], [ %19, %14 ] + %16 = phi i64 [ 1, %13 ], [ %17, %14 ] + %17 = shl i64 %16, 1 + %18 = getelementptr inbounds [33 x i64], [33 x i64]* @bit, i64 0, i64 %15 + store i64 %17, i64* %18, align 8, !tbaa !5 + %19 = add nuw nsw i64 %15, 1 + %20 = icmp eq i64 %19, 33 + br i1 %20, label %21, label %14, !llvm.loop !9 + +21: ; preds = %14, %7 + %22 = load i32, i32* %4, align 4, !tbaa !12 + %23 = icmp eq i32 %22, 0 + br i1 %23, label %24, label %25 + +24: ; preds = %67, %21 + br label %76 + +25: ; preds = %21 + store i32 0, i32* %4, align 4, !tbaa !12 + call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 8 dereferenceable(16) bitcast (%struct.IMMENSE* @icd to i8*), i8 0, i64 16, i1 false) + br label %27 + +26: ; preds = %27 + store i64 %47, i64* getelementptr inbounds (%struct.IMMENSE, %struct.IMMENSE* @icd, i64 0, i32 1), align 8, !tbaa !14 + store i64 %63, i64* getelementptr inbounds (%struct.IMMENSE, %struct.IMMENSE* @icd, i64 0, i32 0), align 8, !tbaa !16 + br label %67 + +27: ; preds = %25, %27 + %28 = phi i64 [ 28, %25 ], [ %64, %27 ] + %29 = phi i64 [ 56, %25 ], [ %65, %27 ] + %30 = phi i64 [ 0, %25 ], [ %47, %27 ] + %31 = phi i64 [ 0, %25 ], [ %63, %27 ] + %32 = shl i64 %30, 1 + %33 = getelementptr inbounds [57 x i8], [57 x i8]* @ipc1, i64 0, i64 %28 + %34 = load i8, i8* %33, align 1, !tbaa !17 + %35 = lshr i64 529835723988510, %28 + %36 = and i64 %35, 1 + %37 = icmp eq i64 %36, 0 + %38 = sext i8 %34 to i64 + %39 = add nsw i64 %38, -32 + %40 = select i1 %37, i64 %38, i64 %39 + %41 = select i1 %37, i64 %3, i64 %2 + %42 = getelementptr inbounds [33 x i64], [33 x i64]* @bit, i64 0, i64 %40 + %43 = load i64, i64* %42, align 8, !tbaa !5 + %44 = and i64 %43, %41 + %45 = icmp ne i64 %44, 0 + %46 = zext i1 %45 to i64 + %47 = or i64 %32, %46 + %48 = shl i64 %31, 1 + %49 = getelementptr inbounds [57 x i8], [57 x i8]* @ipc1, i64 0, i64 %29 + %50 = load i8, i8* %49, align 1, !tbaa !17 + %51 = lshr i64 529835723988510, %29 + %52 = and i64 %51, 1 + %53 = icmp eq i64 %52, 0 + %54 = sext i8 %50 to i64 + %55 = add nsw i64 %54, -32 + %56 = select i1 %53, i64 %54, i64 %55 + %57 = select i1 %53, i64 %3, i64 %2 + %58 = getelementptr inbounds [33 x i64], [33 x i64]* @bit, i64 0, i64 %56 + %59 = load i64, i64* %58, align 8, !tbaa !5 + %60 = and i64 %59, %57 + %61 = icmp ne i64 %60, 0 + %62 = zext i1 %61 to i64 + %63 = or i64 %48, %62 + %64 = add nsw i64 %28, -1 + %65 = add nsw i64 %29, -1 + %66 = icmp ugt i64 %28, 1 + br i1 %66, label %27, label %26, !llvm.loop !18 + +67: ; preds = %26, %67 + %68 = phi i64 [ 1, %26 ], [ %72, %67 ] + %69 = getelementptr inbounds [17 x %struct.GREAT], [17 x %struct.GREAT]* @des.kns, i64 0, i64 %68 + %70 = bitcast %struct.GREAT* %69 to i8* + call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 8 dereferenceable(24) %11, i8* noundef nonnull align 8 dereferenceable(24) %70, i64 24, i1 false), !tbaa.struct !19 + %71 = trunc i64 %68 to i32 + call void @ks(i32 %71, %struct.GREAT* nonnull %9) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 8 dereferenceable(24) %70, i8* noundef nonnull align 8 dereferenceable(24) %11, i64 24, i1 false), !tbaa.struct !19 + %72 = add nuw nsw i64 %68, 1 + %73 = icmp eq i64 %72, 17 + br i1 %73, label %24, label %67, !llvm.loop !20 + +74: ; preds = %76 + %75 = icmp eq i32 %5, 1 + br label %112 + +76: ; preds = %24, %76 + %77 = phi i64 [ %109, %76 ], [ 32, %24 ] + %78 = phi i64 [ %110, %76 ], [ 64, %24 ] + %79 = phi i64 [ %108, %76 ], [ 0, %24 ] + %80 = phi i64 [ %94, %76 ], [ 0, %24 ] + %81 = shl i64 %80, 1 + %82 = getelementptr inbounds [65 x i8], [65 x i8]* @des.ip, i64 0, i64 %77 + %83 = load i8, i8* %82, align 1, !tbaa !17 + %84 = icmp sgt i8 %83, 32 + %85 = sext i8 %83 to i64 + %86 = add nsw i64 %85, -32 + %87 = select i1 %84, i64 %86, i64 %85 + %88 = select i1 %84, i64 %0, i64 %1 + %89 = getelementptr inbounds [33 x i64], [33 x i64]* @bit, i64 0, i64 %87 + %90 = load i64, i64* %89, align 8, !tbaa !5 + %91 = and i64 %90, %88 + %92 = icmp ne i64 %91, 0 + %93 = zext i1 %92 to i64 + %94 = or i64 %81, %93 + %95 = shl i64 %79, 1 + %96 = getelementptr inbounds [65 x i8], [65 x i8]* @des.ip, i64 0, i64 %78 + %97 = load i8, i8* %96, align 1, !tbaa !17 + %98 = icmp sgt i8 %97, 32 + %99 = sext i8 %97 to i64 + %100 = add nsw i64 %99, -32 + %101 = select i1 %98, i64 %100, i64 %99 + %102 = select i1 %98, i64 %0, i64 %1 + %103 = getelementptr inbounds [33 x i64], [33 x i64]* @bit, i64 0, i64 %101 + %104 = load i64, i64* %103, align 8, !tbaa !5 + %105 = and i64 %104, %102 + %106 = icmp ne i64 %105, 0 + %107 = zext i1 %106 to i64 + %108 = or i64 %95, %107 + %109 = add nsw i64 %77, -1 + %110 = add nsw i64 %78, -1 + %111 = icmp ugt i64 %77, 1 + br i1 %111, label %76, label %74, !llvm.loop !21 + +112: ; preds = %74, %112 + %113 = phi i64 [ %108, %74 ], [ %121, %112 ] + %114 = phi i64 [ %94, %74 ], [ %113, %112 ] + %115 = phi i32 [ 1, %74 ], [ %122, %112 ] + %116 = sub nuw nsw i32 17, %115 + %117 = select i1 %75, i32 %116, i32 %115 + %118 = sext i32 %117 to i64 + %119 = getelementptr inbounds [17 x %struct.GREAT], [17 x %struct.GREAT]* @des.kns, i64 0, i64 %118 + call void @cyfun(i64 %113, %struct.GREAT* nonnull byval(%struct.GREAT) align 8 %119, i64* nonnull %8) + %120 = load i64, i64* %8, align 8, !tbaa !5 + %121 = xor i64 %120, %114 + store i64 %121, i64* %8, align 8, !tbaa !5 + %122 = add nuw nsw i32 %115, 1 + %123 = icmp eq i32 %122, 17 + br i1 %123, label %124, label %112, !llvm.loop !22 + +124: ; preds = %112 + store i64 %113, i64* %8, align 8, !tbaa !5 + %125 = getelementptr inbounds %struct.IMMENSE, %struct.IMMENSE* %6, i64 0, i32 0 + %126 = getelementptr inbounds %struct.IMMENSE, %struct.IMMENSE* %6, i64 0, i32 1 + %127 = bitcast %struct.IMMENSE* %6 to i8* + call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 8 dereferenceable(16) %127, i8 0, i64 16, i1 false) + br label %128 + +128: ; preds = %124, %128 + %129 = phi i64 [ 32, %124 ], [ %161, %128 ] + %130 = phi i64 [ 64, %124 ], [ %162, %128 ] + %131 = load i64, i64* %126, align 8, !tbaa !14 + %132 = shl i64 %131, 1 + store i64 %132, i64* %126, align 8, !tbaa !14 + %133 = getelementptr inbounds [65 x i8], [65 x i8]* @des.ipm, i64 0, i64 %129 + %134 = load i8, i8* %133, align 1, !tbaa !17 + %135 = icmp sgt i8 %134, 32 + %136 = sext i8 %134 to i64 + %137 = add nsw i64 %136, -32 + %138 = select i1 %135, i64 %137, i64 %136 + %139 = select i1 %135, i64 %113, i64 %121 + %140 = getelementptr inbounds [33 x i64], [33 x i64]* @bit, i64 0, i64 %138 + %141 = load i64, i64* %140, align 8, !tbaa !5 + %142 = and i64 %141, %139 + %143 = icmp ne i64 %142, 0 + %144 = zext i1 %143 to i64 + %145 = or i64 %132, %144 + store i64 %145, i64* %126, align 8, !tbaa !14 + %146 = load i64, i64* %125, align 8, !tbaa !16 + %147 = shl i64 %146, 1 + store i64 %147, i64* %125, align 8, !tbaa !16 + %148 = getelementptr inbounds [65 x i8], [65 x i8]* @des.ipm, i64 0, i64 %130 + %149 = load i8, i8* %148, align 1, !tbaa !17 + %150 = icmp sgt i8 %149, 32 + %151 = sext i8 %149 to i64 + %152 = add nsw i64 %151, -32 + %153 = select i1 %150, i64 %152, i64 %151 + %154 = select i1 %150, i64 %113, i64 %121 + %155 = getelementptr inbounds [33 x i64], [33 x i64]* @bit, i64 0, i64 %153 + %156 = load i64, i64* %155, align 8, !tbaa !5 + %157 = and i64 %156, %154 + %158 = icmp ne i64 %157, 0 + %159 = zext i1 %158 to i64 + %160 = or i64 %147, %159 + store i64 %160, i64* %125, align 8, !tbaa !16 + %161 = add nsw i64 %129, -1 + %162 = add nsw i64 %130, -1 + %163 = icmp ugt i64 %129, 1 + br i1 %163, label %128, label %164, !llvm.loop !23 + +164: ; preds = %128 + call void @llvm.lifetime.end.p0i8(i64 24, i8* nonnull %11) #6 + call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %10) #6 + ret void +} + +; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn +declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #1 + +; Function Attrs: mustprogress nofree norecurse nosync nounwind readonly sspstrong uwtable willreturn +define dso_local i64 @getbit(i64 %0, i64 %1, i32 %2, i32 %3) local_unnamed_addr #2 { + %5 = icmp sgt i32 %2, %3 + %6 = select i1 %5, i32 %3, i32 0 + %7 = sub nsw i32 %2, %6 + %8 = select i1 %5, i64 %0, i64 %1 + %9 = sext i32 %7 to i64 + %10 = getelementptr inbounds [33 x i64], [33 x i64]* @bit, i64 0, i64 %9 + %11 = load i64, i64* %10, align 8, !tbaa !5 + %12 = and i64 %11, %8 + %13 = icmp ne i64 %12, 0 + %14 = zext i1 %13 to i64 + ret i64 %14 +} + +; Function Attrs: argmemonly mustprogress nofree nounwind willreturn +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg) #3 + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local void @ks(i32 %0, %struct.GREAT* nocapture %1) local_unnamed_addr #4 { + switch i32 %0, label %3 [ + i32 16, label %6 + i32 9, label %6 + i32 2, label %6 + i32 1, label %6 + ] + +3: ; preds = %2 + %4 = load i64, i64* getelementptr inbounds (%struct.IMMENSE, %struct.IMMENSE* @icd, i64 0, i32 1), align 8, !tbaa !14 + %5 = load i64, i64* getelementptr inbounds (%struct.IMMENSE, %struct.IMMENSE* @icd, i64 0, i32 0), align 8, !tbaa !16 + br label %17 + +6: ; preds = %2, %2, %2, %2 + %7 = load i64, i64* getelementptr inbounds (%struct.IMMENSE, %struct.IMMENSE* @icd, i64 0, i32 1), align 8, !tbaa !14 + %8 = shl i64 %7, 28 + %9 = and i64 %8, 268435456 + %10 = or i64 %9, %7 + %11 = lshr i64 %10, 1 + store i64 %11, i64* getelementptr inbounds (%struct.IMMENSE, %struct.IMMENSE* @icd, i64 0, i32 1), align 8, !tbaa !14 + %12 = load i64, i64* getelementptr inbounds (%struct.IMMENSE, %struct.IMMENSE* @icd, i64 0, i32 0), align 8, !tbaa !16 + %13 = shl i64 %12, 28 + %14 = and i64 %13, 268435456 + %15 = or i64 %14, %12 + %16 = lshr i64 %15, 1 + br label %32 + +17: ; preds = %3, %17 + %18 = phi i32 [ 1, %3 ], [ %29, %17 ] + %19 = phi i64 [ %4, %3 ], [ %24, %17 ] + %20 = phi i64 [ %5, %3 ], [ %28, %17 ] + %21 = shl i64 %19, 28 + %22 = and i64 %21, 268435456 + %23 = or i64 %22, %19 + %24 = lshr i64 %23, 1 + %25 = shl i64 %20, 28 + %26 = and i64 %25, 268435456 + %27 = or i64 %26, %20 + %28 = lshr i64 %27, 1 + %29 = add nuw nsw i32 %18, 1 + %30 = icmp eq i32 %29, 3 + br i1 %30, label %31, label %17, !llvm.loop !24 + +31: ; preds = %17 + store i64 %24, i64* getelementptr inbounds (%struct.IMMENSE, %struct.IMMENSE* @icd, i64 0, i32 1), align 8, !tbaa !14 + br label %32 + +32: ; preds = %31, %6 + %33 = phi i64 [ %16, %6 ], [ %28, %31 ] + store i64 %33, i64* getelementptr inbounds (%struct.IMMENSE, %struct.IMMENSE* @icd, i64 0, i32 0), align 8, !tbaa !16 + %34 = getelementptr inbounds %struct.GREAT, %struct.GREAT* %1, i64 0, i32 0 + %35 = getelementptr inbounds %struct.GREAT, %struct.GREAT* %1, i64 0, i32 1 + %36 = getelementptr inbounds %struct.GREAT, %struct.GREAT* %1, i64 0, i32 2 + %37 = bitcast %struct.GREAT* %1 to i8* + call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 8 dereferenceable(24) %37, i8 0, i64 24, i1 false) + %38 = load i64, i64* getelementptr inbounds (%struct.IMMENSE, %struct.IMMENSE* @icd, i64 0, i32 1), align 8 + br label %39 + +39: ; preds = %32, %39 + %40 = phi i64 [ 16, %32 ], [ %82, %39 ] + %41 = phi i64 [ 32, %32 ], [ %83, %39 ] + %42 = phi i64 [ 48, %32 ], [ %84, %39 ] + %43 = load i64, i64* %36, align 8, !tbaa !25 + %44 = shl i64 %43, 1 + store i64 %44, i64* %36, align 8, !tbaa !25 + %45 = getelementptr inbounds [49 x i8], [49 x i8]* @ipc2, i64 0, i64 %40 + %46 = load i8, i8* %45, align 1, !tbaa !17 + %47 = sext i8 %46 to i64 + %48 = getelementptr inbounds [33 x i64], [33 x i64]* @bit, i64 0, i64 %47 + %49 = load i64, i64* %48, align 8, !tbaa !5 + %50 = and i64 %49, %38 + %51 = icmp ne i64 %50, 0 + %52 = zext i1 %51 to i64 + %53 = or i64 %44, %52 + store i64 %53, i64* %36, align 8, !tbaa !25 + %54 = load i64, i64* %35, align 8, !tbaa !27 + %55 = shl i64 %54, 1 + store i64 %55, i64* %35, align 8, !tbaa !27 + %56 = getelementptr inbounds [49 x i8], [49 x i8]* @ipc2, i64 0, i64 %41 + %57 = load i8, i8* %56, align 1, !tbaa !17 + %58 = add nsw i64 %41, -25 + %59 = icmp ult i64 %58, 24 + %60 = sext i8 %57 to i64 + %61 = add nsw i64 %60, -28 + %62 = select i1 %59, i64 %61, i64 %60 + %63 = select i1 %59, i64 %33, i64 %38 + %64 = getelementptr inbounds [33 x i64], [33 x i64]* @bit, i64 0, i64 %62 + %65 = load i64, i64* %64, align 8, !tbaa !5 + %66 = and i64 %65, %63 + %67 = icmp ne i64 %66, 0 + %68 = zext i1 %67 to i64 + %69 = or i64 %55, %68 + store i64 %69, i64* %35, align 8, !tbaa !27 + %70 = load i64, i64* %34, align 8, !tbaa !28 + %71 = shl i64 %70, 1 + store i64 %71, i64* %34, align 8, !tbaa !28 + %72 = getelementptr inbounds [49 x i8], [49 x i8]* @ipc2, i64 0, i64 %42 + %73 = load i8, i8* %72, align 1, !tbaa !17 + %74 = sext i8 %73 to i64 + %75 = add nsw i64 %74, -28 + %76 = getelementptr inbounds [33 x i64], [33 x i64]* @bit, i64 0, i64 %75 + %77 = load i64, i64* %76, align 8, !tbaa !5 + %78 = and i64 %77, %33 + %79 = icmp ne i64 %78, 0 + %80 = zext i1 %79 to i64 + %81 = or i64 %71, %80 + store i64 %81, i64* %34, align 8, !tbaa !28 + %82 = add nsw i64 %40, -1 + %83 = add nsw i64 %41, -1 + %84 = add nsw i64 %42, -1 + %85 = icmp ugt i64 %40, 1 + br i1 %85, label %39, label %86, !llvm.loop !29 + +86: ; preds = %39 + ret void +} + +; Function Attrs: nofree nosync nounwind sspstrong uwtable +define dso_local void @cyfun(i64 %0, %struct.GREAT* nocapture readonly byval(%struct.GREAT) align 8 %1, i64* nocapture %2) local_unnamed_addr #0 { + %4 = alloca [9 x i8], align 1 + %5 = getelementptr inbounds [9 x i8], [9 x i8]* %4, i64 0, i64 0 + call void @llvm.lifetime.start.p0i8(i64 9, i8* nonnull %5) #6 + br label %6 + +6: ; preds = %3, %6 + %7 = phi i64 [ 16, %3 ], [ %43, %6 ] + %8 = phi i64 [ 32, %3 ], [ %44, %6 ] + %9 = phi i64 [ 48, %3 ], [ %45, %6 ] + %10 = phi i64 [ 0, %3 ], [ %22, %6 ] + %11 = phi i64 [ 0, %3 ], [ %32, %6 ] + %12 = phi i64 [ 0, %3 ], [ %42, %6 ] + %13 = shl i64 %10, 1 + %14 = getelementptr inbounds [49 x i32], [49 x i32]* @cyfun.iet, i64 0, i64 %7 + %15 = load i32, i32* %14, align 4, !tbaa !12 + %16 = sext i32 %15 to i64 + %17 = getelementptr inbounds [33 x i64], [33 x i64]* @bit, i64 0, i64 %16 + %18 = load i64, i64* %17, align 8, !tbaa !5 + %19 = and i64 %18, %0 + %20 = icmp ne i64 %19, 0 + %21 = zext i1 %20 to i64 + %22 = or i64 %13, %21 + %23 = shl i64 %11, 1 + %24 = getelementptr inbounds [49 x i32], [49 x i32]* @cyfun.iet, i64 0, i64 %8 + %25 = load i32, i32* %24, align 4, !tbaa !12 + %26 = sext i32 %25 to i64 + %27 = getelementptr inbounds [33 x i64], [33 x i64]* @bit, i64 0, i64 %26 + %28 = load i64, i64* %27, align 8, !tbaa !5 + %29 = and i64 %28, %0 + %30 = icmp ne i64 %29, 0 + %31 = zext i1 %30 to i64 + %32 = or i64 %23, %31 + %33 = shl i64 %12, 1 + %34 = getelementptr inbounds [49 x i32], [49 x i32]* @cyfun.iet, i64 0, i64 %9 + %35 = load i32, i32* %34, align 4, !tbaa !12 + %36 = sext i32 %35 to i64 + %37 = getelementptr inbounds [33 x i64], [33 x i64]* @bit, i64 0, i64 %36 + %38 = load i64, i64* %37, align 8, !tbaa !5 + %39 = and i64 %38, %0 + %40 = icmp ne i64 %39, 0 + %41 = zext i1 %40 to i64 + %42 = or i64 %33, %41 + %43 = add nsw i64 %7, -1 + %44 = add nsw i64 %8, -1 + %45 = add nsw i64 %9, -1 + %46 = icmp ugt i64 %7, 1 + br i1 %46, label %6, label %47, !llvm.loop !30 + +47: ; preds = %6 + %48 = getelementptr inbounds %struct.GREAT, %struct.GREAT* %1, i64 0, i32 2 + %49 = load i64, i64* %48, align 8, !tbaa !25 + %50 = xor i64 %49, %22 + %51 = getelementptr inbounds %struct.GREAT, %struct.GREAT* %1, i64 0, i32 1 + %52 = load i64, i64* %51, align 8, !tbaa !27 + %53 = xor i64 %52, %32 + %54 = getelementptr inbounds %struct.GREAT, %struct.GREAT* %1, i64 0, i32 0 + %55 = load i64, i64* %54, align 8, !tbaa !28 + %56 = xor i64 %55, %42 + %57 = shl i64 %53, 16 + %58 = add i64 %57, %50 + %59 = shl i64 %56, 8 + %60 = lshr i64 %53, 8 + %61 = add i64 %59, %60 + br label %62 + +62: ; preds = %47, %62 + %63 = phi i64 [ 1, %47 ], [ %75, %62 ] + %64 = phi i64 [ 5, %47 ], [ %76, %62 ] + %65 = phi i64 [ %61, %47 ], [ %74, %62 ] + %66 = phi i64 [ %58, %47 ], [ %73, %62 ] + %67 = trunc i64 %66 to i8 + %68 = and i8 %67, 63 + %69 = getelementptr inbounds [9 x i8], [9 x i8]* %4, i64 0, i64 %63 + store i8 %68, i8* %69, align 1, !tbaa !17 + %70 = trunc i64 %65 to i8 + %71 = and i8 %70, 63 + %72 = getelementptr inbounds [9 x i8], [9 x i8]* %4, i64 0, i64 %64 + store i8 %71, i8* %72, align 1, !tbaa !17 + %73 = lshr i64 %66, 6 + %74 = lshr i64 %65, 6 + %75 = add nuw nsw i64 %63, 1 + %76 = add nuw nsw i64 %64, 1 + %77 = icmp eq i64 %75, 5 + br i1 %77, label %78, label %62, !llvm.loop !31 + +78: ; preds = %62, %78 + %79 = phi i64 [ %109, %78 ], [ 8, %62 ] + %80 = phi i64 [ %108, %78 ], [ 0, %62 ] + %81 = getelementptr inbounds [9 x i8], [9 x i8]* %4, i64 0, i64 %79 + %82 = load i8, i8* %81, align 1, !tbaa !17 + %83 = sext i8 %82 to i32 + %84 = shl nsw i32 %83, 1 + %85 = and i32 %84, 2 + %86 = lshr i32 %83, 5 + %87 = and i32 %86, 1 + %88 = or i32 %85, %87 + %89 = shl nsw i32 %83, 2 + %90 = and i32 %89, 8 + %91 = and i32 %83, 4 + %92 = or i32 %90, %91 + %93 = lshr i32 %83, 2 + %94 = and i32 %93, 2 + %95 = or i32 %92, %94 + %96 = lshr i32 %83, 4 + %97 = and i32 %96, 1 + %98 = or i32 %95, %97 + %99 = zext i32 %98 to i64 + %100 = zext i32 %88 to i64 + %101 = getelementptr inbounds [16 x [4 x [9 x i8]]], [16 x [4 x [9 x i8]]]* @cyfun.is, i64 0, i64 %99, i64 %100, i64 %79 + %102 = load i8, i8* %101, align 1, !tbaa !17 + %103 = shl i64 %80, 4 + %104 = sext i8 %102 to i64 + %105 = getelementptr inbounds [16 x i8], [16 x i8]* @cyfun.ibin, i64 0, i64 %104 + %106 = load i8, i8* %105, align 1, !tbaa !17 + %107 = sext i8 %106 to i64 + %108 = or i64 %103, %107 + %109 = add nsw i64 %79, -1 + %110 = icmp ugt i64 %79, 1 + br i1 %110, label %78, label %111, !llvm.loop !32 + +111: ; preds = %78 + store i64 0, i64* %2, align 8, !tbaa !5 + br label %112 + +112: ; preds = %111, %112 + %113 = phi i64 [ 32, %111 ], [ %125, %112 ] + %114 = load i64, i64* %2, align 8, !tbaa !5 + %115 = shl i64 %114, 1 + store i64 %115, i64* %2, align 8, !tbaa !5 + %116 = getelementptr inbounds [33 x i32], [33 x i32]* @cyfun.ipp, i64 0, i64 %113 + %117 = load i32, i32* %116, align 4, !tbaa !12 + %118 = sext i32 %117 to i64 + %119 = getelementptr inbounds [33 x i64], [33 x i64]* @bit, i64 0, i64 %118 + %120 = load i64, i64* %119, align 8, !tbaa !5 + %121 = and i64 %120, %108 + %122 = icmp ne i64 %121, 0 + %123 = zext i1 %122 to i64 + %124 = or i64 %115, %123 + store i64 %124, i64* %2, align 8, !tbaa !5 + %125 = add nsw i64 %113, -1 + %126 = icmp ugt i64 %113, 1 + br i1 %126, label %112, label %127, !llvm.loop !33 + +127: ; preds = %112 + call void @llvm.lifetime.end.p0i8(i64 9, i8* nonnull %5) #6 + ret void +} + +; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn +declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) #1 + +; Function Attrs: nofree nosync nounwind sspstrong uwtable +define dso_local i32 @main() local_unnamed_addr #0 { + %1 = alloca %struct.IMMENSE, align 8 + %2 = alloca i32, align 4 + %3 = bitcast %struct.IMMENSE* %1 to i8* + call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %3) #6 + %4 = bitcast i32* %2 to i8* + call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %4) #6 + %5 = load i32, i32* @value, align 4, !tbaa !12 + store i32 %5, i32* %2, align 4, !tbaa !12 + call void @des(i64 35, i64 26, i64 2, i64 16, i32* nonnull %2, i32 %5, %struct.IMMENSE* nonnull %1) + call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %4) #6 + call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %3) #6 + ret i32 0 +} + +; Function Attrs: argmemonly nofree nounwind willreturn writeonly +declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg) #5 + +attributes #0 = { nofree nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #1 = { argmemonly mustprogress nofree nosync nounwind willreturn } +attributes #2 = { mustprogress nofree norecurse nosync nounwind readonly sspstrong uwtable willreturn "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #3 = { argmemonly mustprogress nofree nounwind willreturn } +attributes #4 = { nofree norecurse nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #5 = { argmemonly nofree nounwind willreturn writeonly } +attributes #6 = { nounwind } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = !{!6, !6, i64 0} +!6 = !{!"long", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C/C++ TBAA"} +!9 = distinct !{!9, !10, !11} +!10 = !{!"llvm.loop.mustprogress"} +!11 = !{!"llvm.loop.unroll.disable"} +!12 = !{!13, !13, i64 0} +!13 = !{!"int", !7, i64 0} +!14 = !{!15, !6, i64 8} +!15 = !{!"IMMENSE", !6, i64 0, !6, i64 8} +!16 = !{!15, !6, i64 0} +!17 = !{!7, !7, i64 0} +!18 = distinct !{!18, !10, !11} +!19 = !{i64 0, i64 8, !5, i64 8, i64 8, !5, i64 16, i64 8, !5} +!20 = distinct !{!20, !10, !11} +!21 = distinct !{!21, !10, !11} +!22 = distinct !{!22, !10, !11} +!23 = distinct !{!23, !10, !11} +!24 = distinct !{!24, !10, !11} +!25 = !{!26, !6, i64 16} +!26 = !{!"GREAT", !6, i64 0, !6, i64 8, !6, i64 16} +!27 = !{!26, !6, i64 8} +!28 = !{!26, !6, i64 0} +!29 = distinct !{!29, !10, !11} +!30 = distinct !{!30, !10, !11} +!31 = distinct !{!31, !10, !11} +!32 = distinct !{!32, !10, !11} +!33 = distinct !{!33, !10, !11} diff --git a/test/ns.ll b/test/ns.ll new file mode 100644 index 0000000..4687299 --- /dev/null +++ b/test/ns.ll @@ -0,0 +1,133 @@ +; ModuleID = 'ns.c' +source_filename = "ns.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +@keys = dso_local local_unnamed_addr global [5 x [5 x [5 x [5 x i32]]]] [[5 x [5 x [5 x i32]]] zeroinitializer, [5 x [5 x [5 x i32]]] [[5 x [5 x i32]] [[5 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1], [5 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1], [5 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1], [5 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1], [5 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1]], [5 x [5 x i32]] [[5 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1], [5 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1], [5 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1], [5 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1], [5 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1]], [5 x [5 x i32]] [[5 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1], [5 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1], [5 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1], [5 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1], [5 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1]], [5 x [5 x i32]] [[5 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1], [5 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1], [5 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1], [5 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1], [5 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1]], [5 x [5 x i32]] [[5 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1], [5 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1], [5 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1], [5 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1], [5 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1]]], [5 x [5 x [5 x i32]]] [[5 x [5 x i32]] [[5 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2], [5 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2], [5 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2], [5 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2], [5 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2]], [5 x [5 x i32]] [[5 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2], [5 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2], [5 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2], [5 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2], [5 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2]], [5 x [5 x i32]] [[5 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2], [5 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2], [5 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2], [5 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2], [5 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2]], [5 x [5 x i32]] [[5 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2], [5 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2], [5 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2], [5 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2], [5 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2]], [5 x [5 x i32]] [[5 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2], [5 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2], [5 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2], [5 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2], [5 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2]]], [5 x [5 x [5 x i32]]] [[5 x [5 x i32]] [[5 x i32] [i32 3, i32 3, i32 3, i32 3, i32 3], [5 x i32] [i32 3, i32 3, i32 3, i32 3, i32 3], [5 x i32] [i32 3, i32 3, i32 3, i32 3, i32 3], [5 x i32] [i32 3, i32 3, i32 3, i32 3, i32 3], [5 x i32] [i32 3, i32 3, i32 3, i32 3, i32 3]], [5 x [5 x i32]] [[5 x i32] [i32 3, i32 3, i32 3, i32 3, i32 3], [5 x i32] [i32 3, i32 3, i32 3, i32 3, i32 3], [5 x i32] [i32 3, i32 3, i32 3, i32 3, i32 3], [5 x i32] [i32 3, i32 3, i32 3, i32 3, i32 3], [5 x i32] [i32 3, i32 3, i32 3, i32 3, i32 3]], [5 x [5 x i32]] [[5 x i32] [i32 3, i32 3, i32 3, i32 3, i32 3], [5 x i32] [i32 3, i32 3, i32 3, i32 3, i32 3], [5 x i32] [i32 3, i32 3, i32 3, i32 3, i32 3], [5 x i32] [i32 3, i32 3, i32 3, i32 3, i32 3], [5 x i32] [i32 3, i32 3, i32 3, i32 3, i32 3]], [5 x [5 x i32]] [[5 x i32] [i32 3, i32 3, i32 3, i32 3, i32 3], [5 x i32] [i32 3, i32 3, i32 3, i32 3, i32 3], [5 x i32] [i32 3, i32 3, i32 3, i32 3, i32 3], [5 x i32] [i32 3, i32 3, i32 3, i32 3, i32 3], [5 x i32] [i32 3, i32 3, i32 3, i32 3, i32 3]], [5 x [5 x i32]] [[5 x i32] [i32 3, i32 3, i32 3, i32 3, i32 3], [5 x i32] [i32 3, i32 3, i32 3, i32 3, i32 3], [5 x i32] [i32 3, i32 3, i32 3, i32 3, i32 3], [5 x i32] [i32 3, i32 3, i32 3, i32 3, i32 3], [5 x i32] [i32 3, i32 3, i32 3, i32 3, i32 3]]], [5 x [5 x [5 x i32]]] [[5 x [5 x i32]] [[5 x i32] [i32 4, i32 4, i32 4, i32 4, i32 4], [5 x i32] [i32 4, i32 4, i32 4, i32 4, i32 4], [5 x i32] [i32 4, i32 4, i32 4, i32 4, i32 4], [5 x i32] [i32 4, i32 4, i32 4, i32 4, i32 4], [5 x i32] [i32 4, i32 4, i32 4, i32 4, i32 4]], [5 x [5 x i32]] [[5 x i32] [i32 4, i32 4, i32 4, i32 4, i32 4], [5 x i32] [i32 4, i32 4, i32 4, i32 4, i32 4], [5 x i32] [i32 4, i32 4, i32 4, i32 4, i32 4], [5 x i32] [i32 4, i32 4, i32 4, i32 4, i32 4], [5 x i32] [i32 4, i32 4, i32 4, i32 4, i32 4]], [5 x [5 x i32]] [[5 x i32] [i32 4, i32 4, i32 4, i32 4, i32 4], [5 x i32] [i32 4, i32 4, i32 4, i32 4, i32 4], [5 x i32] [i32 4, i32 4, i32 4, i32 4, i32 4], [5 x i32] [i32 4, i32 4, i32 4, i32 4, i32 4], [5 x i32] [i32 4, i32 4, i32 4, i32 4, i32 4]], [5 x [5 x i32]] [[5 x i32] [i32 4, i32 4, i32 4, i32 4, i32 4], [5 x i32] [i32 4, i32 4, i32 4, i32 4, i32 4], [5 x i32] [i32 4, i32 4, i32 4, i32 4, i32 4], [5 x i32] [i32 4, i32 4, i32 4, i32 4, i32 4], [5 x i32] [i32 4, i32 4, i32 4, i32 4, i32 4]], [5 x [5 x i32]] [[5 x i32] [i32 4, i32 4, i32 4, i32 4, i32 4], [5 x i32] [i32 4, i32 4, i32 4, i32 4, i32 4], [5 x i32] [i32 4, i32 4, i32 4, i32 4, i32 4], [5 x i32] [i32 4, i32 4, i32 4, i32 4, i32 4], [5 x i32] [i32 4, i32 4, i32 4, i32 4, i32 401]]]], align 16 +@answer = dso_local local_unnamed_addr global [5 x [5 x [5 x [5 x i32]]]] [[5 x [5 x [5 x i32]]] [[5 x [5 x i32]] [[5 x i32] [i32 123, i32 123, i32 123, i32 123, i32 123], [5 x i32] [i32 123, i32 123, i32 123, i32 123, i32 123], [5 x i32] [i32 123, i32 123, i32 123, i32 123, i32 123], [5 x i32] [i32 123, i32 123, i32 123, i32 123, i32 123], [5 x i32] [i32 123, i32 123, i32 123, i32 123, i32 123]], [5 x [5 x i32]] [[5 x i32] [i32 123, i32 123, i32 123, i32 123, i32 123], [5 x i32] [i32 123, i32 123, i32 123, i32 123, i32 123], [5 x i32] [i32 123, i32 123, i32 123, i32 123, i32 123], [5 x i32] [i32 123, i32 123, i32 123, i32 123, i32 123], [5 x i32] [i32 123, i32 123, i32 123, i32 123, i32 123]], [5 x [5 x i32]] [[5 x i32] [i32 123, i32 123, i32 123, i32 123, i32 123], [5 x i32] [i32 123, i32 123, i32 123, i32 123, i32 123], [5 x i32] [i32 123, i32 123, i32 123, i32 123, i32 123], [5 x i32] [i32 123, i32 123, i32 123, i32 123, i32 123], [5 x i32] [i32 123, i32 123, i32 123, i32 123, i32 123]], [5 x [5 x i32]] [[5 x i32] [i32 123, i32 123, i32 123, i32 123, i32 123], [5 x i32] [i32 123, i32 123, i32 123, i32 123, i32 123], [5 x i32] [i32 123, i32 123, i32 123, i32 123, i32 123], [5 x i32] [i32 123, i32 123, i32 123, i32 123, i32 123], [5 x i32] [i32 123, i32 123, i32 123, i32 123, i32 123]], [5 x [5 x i32]] [[5 x i32] [i32 123, i32 123, i32 123, i32 123, i32 123], [5 x i32] [i32 123, i32 123, i32 123, i32 123, i32 123], [5 x i32] [i32 123, i32 123, i32 123, i32 123, i32 123], [5 x i32] [i32 123, i32 123, i32 123, i32 123, i32 123], [5 x i32] [i32 123, i32 123, i32 123, i32 123, i32 123]]], [5 x [5 x [5 x i32]]] [[5 x [5 x i32]] [[5 x i32] [i32 234, i32 234, i32 234, i32 234, i32 234], [5 x i32] [i32 234, i32 234, i32 234, i32 234, i32 234], [5 x i32] [i32 234, i32 234, i32 234, i32 234, i32 234], [5 x i32] [i32 234, i32 234, i32 234, i32 234, i32 234], [5 x i32] [i32 234, i32 234, i32 234, i32 234, i32 234]], [5 x [5 x i32]] [[5 x i32] [i32 234, i32 234, i32 234, i32 234, i32 234], [5 x i32] [i32 234, i32 234, i32 234, i32 234, i32 234], [5 x i32] [i32 234, i32 234, i32 234, i32 234, i32 234], [5 x i32] [i32 234, i32 234, i32 234, i32 234, i32 234], [5 x i32] [i32 234, i32 234, i32 234, i32 234, i32 234]], [5 x [5 x i32]] [[5 x i32] [i32 234, i32 234, i32 234, i32 234, i32 234], [5 x i32] [i32 234, i32 234, i32 234, i32 234, i32 234], [5 x i32] [i32 234, i32 234, i32 234, i32 234, i32 234], [5 x i32] [i32 234, i32 234, i32 234, i32 234, i32 234], [5 x i32] [i32 234, i32 234, i32 234, i32 234, i32 234]], [5 x [5 x i32]] [[5 x i32] [i32 234, i32 234, i32 234, i32 234, i32 234], [5 x i32] [i32 234, i32 234, i32 234, i32 234, i32 234], [5 x i32] [i32 234, i32 234, i32 234, i32 234, i32 234], [5 x i32] [i32 234, i32 234, i32 234, i32 234, i32 234], [5 x i32] [i32 234, i32 234, i32 234, i32 234, i32 234]], [5 x [5 x i32]] [[5 x i32] [i32 234, i32 234, i32 234, i32 234, i32 234], [5 x i32] [i32 234, i32 234, i32 234, i32 234, i32 234], [5 x i32] [i32 234, i32 234, i32 234, i32 234, i32 234], [5 x i32] [i32 234, i32 234, i32 234, i32 234, i32 234], [5 x i32] [i32 234, i32 234, i32 234, i32 234, i32 234]]], [5 x [5 x [5 x i32]]] [[5 x [5 x i32]] [[5 x i32] [i32 345, i32 345, i32 345, i32 345, i32 0], [5 x i32] [i32 345, i32 345, i32 345, i32 345, i32 0], [5 x i32] [i32 345, i32 345, i32 345, i32 345, i32 0], [5 x i32] [i32 345, i32 345, i32 345, i32 345, i32 0], [5 x i32] [i32 345, i32 345, i32 345, i32 345, i32 0]], [5 x [5 x i32]] [[5 x i32] [i32 345, i32 345, i32 345, i32 345, i32 0], [5 x i32] [i32 345, i32 345, i32 345, i32 345, i32 0], [5 x i32] [i32 345, i32 345, i32 345, i32 345, i32 0], [5 x i32] [i32 345, i32 345, i32 345, i32 345, i32 0], [5 x i32] [i32 345, i32 345, i32 345, i32 345, i32 0]], [5 x [5 x i32]] [[5 x i32] [i32 345, i32 345, i32 345, i32 345, i32 0], [5 x i32] [i32 345, i32 345, i32 345, i32 345, i32 0], [5 x i32] [i32 345, i32 345, i32 345, i32 345, i32 0], [5 x i32] [i32 345, i32 345, i32 345, i32 345, i32 0], [5 x i32] [i32 345, i32 345, i32 345, i32 345, i32 0]], [5 x [5 x i32]] [[5 x i32] [i32 345, i32 345, i32 345, i32 345, i32 0], [5 x i32] [i32 345, i32 345, i32 345, i32 345, i32 0], [5 x i32] [i32 345, i32 345, i32 345, i32 345, i32 0], [5 x i32] [i32 345, i32 345, i32 345, i32 345, i32 0], [5 x i32] [i32 345, i32 345, i32 345, i32 345, i32 0]], [5 x [5 x i32]] [[5 x i32] [i32 345, i32 345, i32 345, i32 345, i32 0], [5 x i32] [i32 345, i32 345, i32 345, i32 345, i32 0], [5 x i32] [i32 345, i32 345, i32 345, i32 345, i32 0], [5 x i32] [i32 345, i32 345, i32 345, i32 345, i32 0], [5 x i32] [i32 345, i32 345, i32 345, i32 345, i32 0]]], [5 x [5 x [5 x i32]]] [[5 x [5 x i32]] [[5 x i32] [i32 456, i32 456, i32 456, i32 456, i32 456], [5 x i32] [i32 456, i32 456, i32 456, i32 456, i32 456], [5 x i32] [i32 456, i32 456, i32 456, i32 456, i32 456], [5 x i32] [i32 456, i32 456, i32 456, i32 456, i32 456], [5 x i32] [i32 456, i32 456, i32 456, i32 456, i32 456]], [5 x [5 x i32]] [[5 x i32] [i32 456, i32 456, i32 456, i32 456, i32 456], [5 x i32] [i32 456, i32 456, i32 456, i32 456, i32 456], [5 x i32] [i32 456, i32 456, i32 456, i32 456, i32 456], [5 x i32] [i32 456, i32 456, i32 456, i32 456, i32 456], [5 x i32] [i32 456, i32 456, i32 456, i32 456, i32 456]], [5 x [5 x i32]] [[5 x i32] [i32 456, i32 456, i32 456, i32 456, i32 456], [5 x i32] [i32 456, i32 456, i32 456, i32 456, i32 456], [5 x i32] [i32 456, i32 456, i32 456, i32 456, i32 456], [5 x i32] [i32 456, i32 456, i32 456, i32 456, i32 456], [5 x i32] [i32 456, i32 456, i32 456, i32 456, i32 456]], [5 x [5 x i32]] [[5 x i32] [i32 456, i32 456, i32 456, i32 456, i32 456], [5 x i32] [i32 456, i32 456, i32 456, i32 456, i32 456], [5 x i32] [i32 456, i32 456, i32 456, i32 456, i32 456], [5 x i32] [i32 456, i32 456, i32 456, i32 456, i32 456], [5 x i32] [i32 456, i32 456, i32 456, i32 456, i32 456]], [5 x [5 x i32]] [[5 x i32] [i32 456, i32 456, i32 456, i32 456, i32 456], [5 x i32] [i32 456, i32 456, i32 456, i32 456, i32 456], [5 x i32] [i32 456, i32 456, i32 456, i32 456, i32 456], [5 x i32] [i32 456, i32 456, i32 456, i32 456, i32 456], [5 x i32] [i32 456, i32 456, i32 456, i32 456, i32 456]]], [5 x [5 x [5 x i32]]] [[5 x [5 x i32]] [[5 x i32] [i32 567, i32 567, i32 567, i32 567, i32 567], [5 x i32] [i32 567, i32 567, i32 567, i32 567, i32 567], [5 x i32] [i32 567, i32 567, i32 567, i32 567, i32 567], [5 x i32] [i32 567, i32 567, i32 567, i32 567, i32 567], [5 x i32] [i32 567, i32 567, i32 567, i32 567, i32 567]], [5 x [5 x i32]] [[5 x i32] [i32 567, i32 567, i32 567, i32 567, i32 567], [5 x i32] [i32 567, i32 567, i32 567, i32 567, i32 567], [5 x i32] [i32 567, i32 567, i32 567, i32 567, i32 567], [5 x i32] [i32 567, i32 567, i32 567, i32 567, i32 567], [5 x i32] [i32 567, i32 567, i32 567, i32 567, i32 567]], [5 x [5 x i32]] [[5 x i32] [i32 567, i32 567, i32 567, i32 567, i32 567], [5 x i32] [i32 567, i32 567, i32 567, i32 567, i32 567], [5 x i32] [i32 567, i32 567, i32 567, i32 567, i32 567], [5 x i32] [i32 567, i32 567, i32 567, i32 567, i32 567], [5 x i32] [i32 567, i32 567, i32 567, i32 567, i32 567]], [5 x [5 x i32]] [[5 x i32] [i32 567, i32 567, i32 567, i32 567, i32 567], [5 x i32] [i32 567, i32 567, i32 567, i32 567, i32 567], [5 x i32] [i32 567, i32 567, i32 567, i32 567, i32 567], [5 x i32] [i32 567, i32 567, i32 567, i32 567, i32 567], [5 x i32] [i32 567, i32 567, i32 567, i32 567, i32 567]], [5 x [5 x i32]] [[5 x i32] [i32 567, i32 567, i32 567, i32 567, i32 567], [5 x i32] [i32 567, i32 567, i32 567, i32 567, i32 567], [5 x i32] [i32 567, i32 567, i32 567, i32 567, i32 567], [5 x i32] [i32 567, i32 567, i32 567, i32 567, i32 567], [5 x i32] [i32 567, i32 567, i32 567, i32 567, i32 1111]]]], align 16 + +; Function Attrs: nofree norecurse nosync nounwind readonly sspstrong uwtable +define dso_local i32 @foo(i32 %0) local_unnamed_addr #0 { + br label %2 + +2: ; preds = %1, %30 + %3 = phi i64 [ 0, %1 ], [ %31, %30 ] + br label %4 + +4: ; preds = %2, %27 + %5 = phi i64 [ 0, %2 ], [ %28, %27 ] + br label %6 + +6: ; preds = %4, %24 + %7 = phi i64 [ 0, %4 ], [ %25, %24 ] + br label %8 + +8: ; preds = %6, %21 + %9 = phi i64 [ 0, %6 ], [ %22, %21 ] + %10 = getelementptr inbounds [5 x [5 x [5 x [5 x i32]]]], [5 x [5 x [5 x [5 x i32]]]]* @keys, i64 0, i64 %3, i64 %5, i64 %7, i64 %9 + %11 = load i32, i32* %10, align 4, !tbaa !5 + %12 = icmp eq i32 %11, %0 + br i1 %12, label %13, label %21 + +13: ; preds = %8 + %14 = and i64 %3, 4294967295 + %15 = and i64 %5, 4294967295 + %16 = and i64 %7, 4294967295 + %17 = and i64 %9, 4294967295 + %18 = getelementptr inbounds [5 x [5 x [5 x [5 x i32]]]], [5 x [5 x [5 x [5 x i32]]]]* @answer, i64 0, i64 %14, i64 %15, i64 %16, i64 %17 + %19 = load i32, i32* %18, align 4, !tbaa !5 + %20 = add nsw i32 %19, %11 + br label %33 + +21: ; preds = %8 + %22 = add nuw nsw i64 %9, 1 + %23 = icmp eq i64 %22, 5 + br i1 %23, label %24, label %8, !llvm.loop !9 + +24: ; preds = %21 + %25 = add nuw nsw i64 %7, 1 + %26 = icmp eq i64 %25, 5 + br i1 %26, label %27, label %6, !llvm.loop !12 + +27: ; preds = %24 + %28 = add nuw nsw i64 %5, 1 + %29 = icmp eq i64 %28, 5 + br i1 %29, label %30, label %4, !llvm.loop !13 + +30: ; preds = %27 + %31 = add nuw nsw i64 %3, 1 + %32 = icmp eq i64 %31, 5 + br i1 %32, label %33, label %2, !llvm.loop !14 + +33: ; preds = %30, %13 + %34 = phi i32 [ %20, %13 ], [ -1, %30 ] + ret i32 %34 +} + +; Function Attrs: nofree norecurse nosync nounwind readonly sspstrong uwtable +define dso_local void @main() local_unnamed_addr #0 { + br label %1 + +1: ; preds = %21, %0 + %2 = phi i64 [ 0, %0 ], [ %22, %21 ] + br label %3 + +3: ; preds = %18, %1 + %4 = phi i64 [ 0, %1 ], [ %19, %18 ] + br label %5 + +5: ; preds = %15, %3 + %6 = phi i64 [ 0, %3 ], [ %16, %15 ] + br label %7 + +7: ; preds = %12, %5 + %8 = phi i64 [ 0, %5 ], [ %13, %12 ] + %9 = getelementptr inbounds [5 x [5 x [5 x [5 x i32]]]], [5 x [5 x [5 x [5 x i32]]]]* @keys, i64 0, i64 %2, i64 %4, i64 %6, i64 %8 + %10 = load i32, i32* %9, align 4, !tbaa !5 + %11 = icmp eq i32 %10, 400 + br i1 %11, label %24, label %12 + +12: ; preds = %7 + %13 = add nuw nsw i64 %8, 1 + %14 = icmp eq i64 %13, 5 + br i1 %14, label %15, label %7, !llvm.loop !9 + +15: ; preds = %12 + %16 = add nuw nsw i64 %6, 1 + %17 = icmp eq i64 %16, 5 + br i1 %17, label %18, label %5, !llvm.loop !12 + +18: ; preds = %15 + %19 = add nuw nsw i64 %4, 1 + %20 = icmp eq i64 %19, 5 + br i1 %20, label %21, label %3, !llvm.loop !13 + +21: ; preds = %18 + %22 = add nuw nsw i64 %2, 1 + %23 = icmp eq i64 %22, 5 + br i1 %23, label %24, label %1, !llvm.loop !14 + +24: ; preds = %21, %7 + ret void +} + +attributes #0 = { nofree norecurse nosync nounwind readonly sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = !{!6, !6, i64 0} +!6 = !{!"int", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C/C++ TBAA"} +!9 = distinct !{!9, !10, !11} +!10 = !{!"llvm.loop.mustprogress"} +!11 = !{!"llvm.loop.unroll.disable"} +!12 = distinct !{!12, !10, !11} +!13 = distinct !{!13, !10, !11} +!14 = distinct !{!14, !10, !11} diff --git a/test/nsichneu.ll b/test/nsichneu.ll new file mode 100644 index 0000000..accef65 --- /dev/null +++ b/test/nsichneu.ll @@ -0,0 +1,6705 @@ +; ModuleID = 'nsichneu.c' +source_filename = "nsichneu.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +@P1_is_marked = dso_local global i32 3, align 4 +@P2_is_marked = dso_local global i32 5, align 4 +@P3_is_marked = dso_local global i32 0, align 4 +@P1_marking_member_0 = dso_local global [3 x i64] zeroinitializer, align 16 +@P3_marking_member_0 = dso_local global [6 x i64] zeroinitializer, align 16 +@P2_marking_member_0 = dso_local global [5 x i64] zeroinitializer, align 16 + +; Function Attrs: nofree norecurse nounwind sspstrong uwtable +define dso_local i32 @main() local_unnamed_addr #0 { + br label %1 + +1: ; preds = %0, %4461 + %2 = phi i32 [ 2, %0 ], [ %3, %4461 ] + %3 = add nsw i32 %2, -1 + %4 = load volatile i32, i32* @P1_is_marked, align 4, !tbaa !5 + %5 = icmp sgt i32 %4, 2 + br i1 %5, label %6, label %34 + +6: ; preds = %1 + %7 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %8 = icmp slt i32 %7, 4 + br i1 %8, label %9, label %34 + +9: ; preds = %6 + %10 = load volatile i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @P1_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %11 = load volatile i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @P1_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %12 = icmp eq i64 %10, %11 + br i1 %12, label %13, label %34 + +13: ; preds = %9 + %14 = load volatile i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @P1_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %15 = load volatile i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @P1_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %16 = icmp slt i64 %14, %15 + br i1 %16, label %17, label %34 + +17: ; preds = %13 + %18 = load volatile i32, i32* @P1_is_marked, align 4, !tbaa !5 + %19 = add nsw i32 %18, -3 + store volatile i32 %19, i32* @P1_is_marked, align 4, !tbaa !5 + %20 = sub nsw i64 %14, %15 + %21 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %22 = sext i32 %21 to i64 + %23 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %22 + store volatile i64 %14, i64* %23, align 8, !tbaa !9 + %24 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %25 = add nsw i32 %24, 1 + %26 = sext i32 %25 to i64 + %27 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %26 + store volatile i64 %15, i64* %27, align 8, !tbaa !9 + %28 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %29 = add nsw i32 %28, 2 + %30 = sext i32 %29 to i64 + %31 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %30 + store volatile i64 %20, i64* %31, align 8, !tbaa !9 + %32 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %33 = add nsw i32 %32, 3 + store volatile i32 %33, i32* @P3_is_marked, align 4, !tbaa !5 + br label %34 + +34: ; preds = %13, %17, %9, %6, %1 + %35 = load volatile i32, i32* @P1_is_marked, align 4, !tbaa !5 + %36 = icmp sgt i32 %35, 2 + br i1 %36, label %37, label %65 + +37: ; preds = %34 + %38 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %39 = icmp slt i32 %38, 4 + br i1 %39, label %40, label %65 + +40: ; preds = %37 + %41 = load volatile i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @P1_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %42 = load volatile i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @P1_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %43 = icmp eq i64 %41, %42 + br i1 %43, label %44, label %65 + +44: ; preds = %40 + %45 = load volatile i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @P1_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %46 = load volatile i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @P1_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %47 = icmp slt i64 %45, %46 + br i1 %47, label %48, label %65 + +48: ; preds = %44 + %49 = load volatile i32, i32* @P1_is_marked, align 4, !tbaa !5 + %50 = add nsw i32 %49, -3 + store volatile i32 %50, i32* @P1_is_marked, align 4, !tbaa !5 + %51 = sub nsw i64 %45, %46 + %52 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %53 = sext i32 %52 to i64 + %54 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %53 + store volatile i64 %45, i64* %54, align 8, !tbaa !9 + %55 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %56 = add nsw i32 %55, 1 + %57 = sext i32 %56 to i64 + %58 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %57 + store volatile i64 %46, i64* %58, align 8, !tbaa !9 + %59 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %60 = add nsw i32 %59, 2 + %61 = sext i32 %60 to i64 + %62 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %61 + store volatile i64 %51, i64* %62, align 8, !tbaa !9 + %63 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %64 = add nsw i32 %63, 3 + store volatile i32 %64, i32* @P3_is_marked, align 4, !tbaa !5 + br label %65 + +65: ; preds = %44, %48, %40, %37, %34 + %66 = load volatile i32, i32* @P1_is_marked, align 4, !tbaa !5 + %67 = icmp sgt i32 %66, 2 + br i1 %67, label %68, label %96 + +68: ; preds = %65 + %69 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %70 = icmp slt i32 %69, 4 + br i1 %70, label %71, label %96 + +71: ; preds = %68 + %72 = load volatile i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @P1_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %73 = load volatile i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @P1_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %74 = icmp eq i64 %72, %73 + br i1 %74, label %75, label %96 + +75: ; preds = %71 + %76 = load volatile i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @P1_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %77 = load volatile i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @P1_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %78 = icmp slt i64 %76, %77 + br i1 %78, label %79, label %96 + +79: ; preds = %75 + %80 = load volatile i32, i32* @P1_is_marked, align 4, !tbaa !5 + %81 = add nsw i32 %80, -3 + store volatile i32 %81, i32* @P1_is_marked, align 4, !tbaa !5 + %82 = sub nsw i64 %76, %77 + %83 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %84 = sext i32 %83 to i64 + %85 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %84 + store volatile i64 %76, i64* %85, align 8, !tbaa !9 + %86 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %87 = add nsw i32 %86, 1 + %88 = sext i32 %87 to i64 + %89 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %88 + store volatile i64 %77, i64* %89, align 8, !tbaa !9 + %90 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %91 = add nsw i32 %90, 2 + %92 = sext i32 %91 to i64 + %93 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %92 + store volatile i64 %82, i64* %93, align 8, !tbaa !9 + %94 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %95 = add nsw i32 %94, 3 + store volatile i32 %95, i32* @P3_is_marked, align 4, !tbaa !5 + br label %96 + +96: ; preds = %75, %79, %71, %68, %65 + %97 = load volatile i32, i32* @P1_is_marked, align 4, !tbaa !5 + %98 = icmp sgt i32 %97, 2 + br i1 %98, label %99, label %127 + +99: ; preds = %96 + %100 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %101 = icmp slt i32 %100, 4 + br i1 %101, label %102, label %127 + +102: ; preds = %99 + %103 = load volatile i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @P1_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %104 = load volatile i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @P1_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %105 = icmp eq i64 %103, %104 + br i1 %105, label %106, label %127 + +106: ; preds = %102 + %107 = load volatile i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @P1_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %108 = load volatile i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @P1_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %109 = icmp slt i64 %107, %108 + br i1 %109, label %110, label %127 + +110: ; preds = %106 + %111 = load volatile i32, i32* @P1_is_marked, align 4, !tbaa !5 + %112 = add nsw i32 %111, -3 + store volatile i32 %112, i32* @P1_is_marked, align 4, !tbaa !5 + %113 = sub nsw i64 %107, %108 + %114 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %115 = sext i32 %114 to i64 + %116 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %115 + store volatile i64 %107, i64* %116, align 8, !tbaa !9 + %117 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %118 = add nsw i32 %117, 1 + %119 = sext i32 %118 to i64 + %120 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %119 + store volatile i64 %108, i64* %120, align 8, !tbaa !9 + %121 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %122 = add nsw i32 %121, 2 + %123 = sext i32 %122 to i64 + %124 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %123 + store volatile i64 %113, i64* %124, align 8, !tbaa !9 + %125 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %126 = add nsw i32 %125, 3 + store volatile i32 %126, i32* @P3_is_marked, align 4, !tbaa !5 + br label %127 + +127: ; preds = %106, %110, %102, %99, %96 + %128 = load volatile i32, i32* @P1_is_marked, align 4, !tbaa !5 + %129 = icmp sgt i32 %128, 2 + br i1 %129, label %130, label %158 + +130: ; preds = %127 + %131 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %132 = icmp slt i32 %131, 4 + br i1 %132, label %133, label %158 + +133: ; preds = %130 + %134 = load volatile i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @P1_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %135 = load volatile i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @P1_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %136 = icmp eq i64 %134, %135 + br i1 %136, label %137, label %158 + +137: ; preds = %133 + %138 = load volatile i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @P1_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %139 = load volatile i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @P1_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %140 = icmp slt i64 %138, %139 + br i1 %140, label %141, label %158 + +141: ; preds = %137 + %142 = load volatile i32, i32* @P1_is_marked, align 4, !tbaa !5 + %143 = add nsw i32 %142, -3 + store volatile i32 %143, i32* @P1_is_marked, align 4, !tbaa !5 + %144 = sub nsw i64 %138, %139 + %145 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %146 = sext i32 %145 to i64 + %147 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %146 + store volatile i64 %138, i64* %147, align 8, !tbaa !9 + %148 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %149 = add nsw i32 %148, 1 + %150 = sext i32 %149 to i64 + %151 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %150 + store volatile i64 %139, i64* %151, align 8, !tbaa !9 + %152 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %153 = add nsw i32 %152, 2 + %154 = sext i32 %153 to i64 + %155 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %154 + store volatile i64 %144, i64* %155, align 8, !tbaa !9 + %156 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %157 = add nsw i32 %156, 3 + store volatile i32 %157, i32* @P3_is_marked, align 4, !tbaa !5 + br label %158 + +158: ; preds = %137, %141, %133, %130, %127 + %159 = load volatile i32, i32* @P1_is_marked, align 4, !tbaa !5 + %160 = icmp sgt i32 %159, 2 + br i1 %160, label %161, label %189 + +161: ; preds = %158 + %162 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %163 = icmp slt i32 %162, 4 + br i1 %163, label %164, label %189 + +164: ; preds = %161 + %165 = load volatile i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @P1_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %166 = load volatile i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @P1_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %167 = icmp eq i64 %165, %166 + br i1 %167, label %168, label %189 + +168: ; preds = %164 + %169 = load volatile i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @P1_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %170 = load volatile i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @P1_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %171 = icmp slt i64 %169, %170 + br i1 %171, label %172, label %189 + +172: ; preds = %168 + %173 = load volatile i32, i32* @P1_is_marked, align 4, !tbaa !5 + %174 = add nsw i32 %173, -3 + store volatile i32 %174, i32* @P1_is_marked, align 4, !tbaa !5 + %175 = sub nsw i64 %169, %170 + %176 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %177 = sext i32 %176 to i64 + %178 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %177 + store volatile i64 %169, i64* %178, align 8, !tbaa !9 + %179 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %180 = add nsw i32 %179, 1 + %181 = sext i32 %180 to i64 + %182 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %181 + store volatile i64 %170, i64* %182, align 8, !tbaa !9 + %183 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %184 = add nsw i32 %183, 2 + %185 = sext i32 %184 to i64 + %186 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %185 + store volatile i64 %175, i64* %186, align 8, !tbaa !9 + %187 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %188 = add nsw i32 %187, 3 + store volatile i32 %188, i32* @P3_is_marked, align 4, !tbaa !5 + br label %189 + +189: ; preds = %168, %172, %164, %161, %158 + %190 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %191 = icmp sgt i32 %190, 3 + br i1 %191, label %192, label %224 + +192: ; preds = %189 + %193 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %194 = icmp slt i32 %193, 4 + br i1 %194, label %195, label %224 + +195: ; preds = %192 + %196 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %197 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %198 = icmp eq i64 %196, %197 + br i1 %198, label %199, label %224 + +199: ; preds = %195 + %200 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %201 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %202 = icmp eq i64 %200, %201 + br i1 %202, label %203, label %224 + +203: ; preds = %199 + %204 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %205 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %206 = icmp sgt i64 %205, %204 + br i1 %206, label %207, label %224 + +207: ; preds = %203 + %208 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %209 = add nsw i32 %208, -4 + store volatile i32 %209, i32* @P2_is_marked, align 4, !tbaa !5 + %210 = add nsw i64 %205, %204 + %211 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %212 = sext i32 %211 to i64 + %213 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %212 + store volatile i64 %204, i64* %213, align 8, !tbaa !9 + %214 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %215 = add nsw i32 %214, 1 + %216 = sext i32 %215 to i64 + %217 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %216 + store volatile i64 %205, i64* %217, align 8, !tbaa !9 + %218 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %219 = add nsw i32 %218, 2 + %220 = sext i32 %219 to i64 + %221 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %220 + store volatile i64 %210, i64* %221, align 8, !tbaa !9 + %222 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %223 = add nsw i32 %222, 3 + store volatile i32 %223, i32* @P3_is_marked, align 4, !tbaa !5 + br label %224 + +224: ; preds = %203, %207, %199, %195, %192, %189 + %225 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %226 = icmp sgt i32 %225, 3 + br i1 %226, label %227, label %259 + +227: ; preds = %224 + %228 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %229 = icmp slt i32 %228, 4 + br i1 %229, label %230, label %259 + +230: ; preds = %227 + %231 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %232 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %233 = icmp eq i64 %231, %232 + br i1 %233, label %234, label %259 + +234: ; preds = %230 + %235 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %236 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %237 = icmp eq i64 %235, %236 + br i1 %237, label %238, label %259 + +238: ; preds = %234 + %239 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %240 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %241 = icmp sgt i64 %240, %239 + br i1 %241, label %242, label %259 + +242: ; preds = %238 + %243 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %244 = add nsw i32 %243, -4 + store volatile i32 %244, i32* @P2_is_marked, align 4, !tbaa !5 + %245 = add nsw i64 %240, %239 + %246 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %247 = sext i32 %246 to i64 + %248 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %247 + store volatile i64 %239, i64* %248, align 8, !tbaa !9 + %249 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %250 = add nsw i32 %249, 1 + %251 = sext i32 %250 to i64 + %252 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %251 + store volatile i64 %240, i64* %252, align 8, !tbaa !9 + %253 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %254 = add nsw i32 %253, 2 + %255 = sext i32 %254 to i64 + %256 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %255 + store volatile i64 %245, i64* %256, align 8, !tbaa !9 + %257 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %258 = add nsw i32 %257, 3 + store volatile i32 %258, i32* @P3_is_marked, align 4, !tbaa !5 + br label %259 + +259: ; preds = %238, %242, %234, %230, %227, %224 + %260 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %261 = icmp sgt i32 %260, 3 + br i1 %261, label %262, label %294 + +262: ; preds = %259 + %263 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %264 = icmp slt i32 %263, 4 + br i1 %264, label %265, label %294 + +265: ; preds = %262 + %266 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %267 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %268 = icmp eq i64 %266, %267 + br i1 %268, label %269, label %294 + +269: ; preds = %265 + %270 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %271 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %272 = icmp eq i64 %270, %271 + br i1 %272, label %273, label %294 + +273: ; preds = %269 + %274 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %275 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %276 = icmp sgt i64 %275, %274 + br i1 %276, label %277, label %294 + +277: ; preds = %273 + %278 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %279 = add nsw i32 %278, -4 + store volatile i32 %279, i32* @P2_is_marked, align 4, !tbaa !5 + %280 = add nsw i64 %275, %274 + %281 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %282 = sext i32 %281 to i64 + %283 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %282 + store volatile i64 %274, i64* %283, align 8, !tbaa !9 + %284 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %285 = add nsw i32 %284, 1 + %286 = sext i32 %285 to i64 + %287 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %286 + store volatile i64 %275, i64* %287, align 8, !tbaa !9 + %288 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %289 = add nsw i32 %288, 2 + %290 = sext i32 %289 to i64 + %291 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %290 + store volatile i64 %280, i64* %291, align 8, !tbaa !9 + %292 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %293 = add nsw i32 %292, 3 + store volatile i32 %293, i32* @P3_is_marked, align 4, !tbaa !5 + br label %294 + +294: ; preds = %273, %277, %269, %265, %262, %259 + %295 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %296 = icmp sgt i32 %295, 3 + br i1 %296, label %297, label %329 + +297: ; preds = %294 + %298 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %299 = icmp slt i32 %298, 4 + br i1 %299, label %300, label %329 + +300: ; preds = %297 + %301 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %302 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %303 = icmp eq i64 %301, %302 + br i1 %303, label %304, label %329 + +304: ; preds = %300 + %305 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %306 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %307 = icmp eq i64 %305, %306 + br i1 %307, label %308, label %329 + +308: ; preds = %304 + %309 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %310 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %311 = icmp sgt i64 %310, %309 + br i1 %311, label %312, label %329 + +312: ; preds = %308 + %313 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %314 = add nsw i32 %313, -4 + store volatile i32 %314, i32* @P2_is_marked, align 4, !tbaa !5 + %315 = add nsw i64 %310, %309 + %316 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %317 = sext i32 %316 to i64 + %318 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %317 + store volatile i64 %309, i64* %318, align 8, !tbaa !9 + %319 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %320 = add nsw i32 %319, 1 + %321 = sext i32 %320 to i64 + %322 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %321 + store volatile i64 %310, i64* %322, align 8, !tbaa !9 + %323 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %324 = add nsw i32 %323, 2 + %325 = sext i32 %324 to i64 + %326 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %325 + store volatile i64 %315, i64* %326, align 8, !tbaa !9 + %327 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %328 = add nsw i32 %327, 3 + store volatile i32 %328, i32* @P3_is_marked, align 4, !tbaa !5 + br label %329 + +329: ; preds = %308, %312, %304, %300, %297, %294 + %330 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %331 = icmp sgt i32 %330, 3 + br i1 %331, label %332, label %364 + +332: ; preds = %329 + %333 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %334 = icmp slt i32 %333, 4 + br i1 %334, label %335, label %364 + +335: ; preds = %332 + %336 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %337 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %338 = icmp eq i64 %336, %337 + br i1 %338, label %339, label %364 + +339: ; preds = %335 + %340 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %341 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %342 = icmp eq i64 %340, %341 + br i1 %342, label %343, label %364 + +343: ; preds = %339 + %344 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %345 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %346 = icmp sgt i64 %345, %344 + br i1 %346, label %347, label %364 + +347: ; preds = %343 + %348 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %349 = add nsw i32 %348, -4 + store volatile i32 %349, i32* @P2_is_marked, align 4, !tbaa !5 + %350 = add nsw i64 %345, %344 + %351 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %352 = sext i32 %351 to i64 + %353 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %352 + store volatile i64 %344, i64* %353, align 8, !tbaa !9 + %354 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %355 = add nsw i32 %354, 1 + %356 = sext i32 %355 to i64 + %357 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %356 + store volatile i64 %345, i64* %357, align 8, !tbaa !9 + %358 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %359 = add nsw i32 %358, 2 + %360 = sext i32 %359 to i64 + %361 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %360 + store volatile i64 %350, i64* %361, align 8, !tbaa !9 + %362 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %363 = add nsw i32 %362, 3 + store volatile i32 %363, i32* @P3_is_marked, align 4, !tbaa !5 + br label %364 + +364: ; preds = %343, %347, %339, %335, %332, %329 + %365 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %366 = icmp sgt i32 %365, 3 + br i1 %366, label %367, label %399 + +367: ; preds = %364 + %368 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %369 = icmp slt i32 %368, 4 + br i1 %369, label %370, label %399 + +370: ; preds = %367 + %371 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %372 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %373 = icmp eq i64 %371, %372 + br i1 %373, label %374, label %399 + +374: ; preds = %370 + %375 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %376 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %377 = icmp eq i64 %375, %376 + br i1 %377, label %378, label %399 + +378: ; preds = %374 + %379 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %380 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %381 = icmp sgt i64 %380, %379 + br i1 %381, label %382, label %399 + +382: ; preds = %378 + %383 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %384 = add nsw i32 %383, -4 + store volatile i32 %384, i32* @P2_is_marked, align 4, !tbaa !5 + %385 = add nsw i64 %380, %379 + %386 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %387 = sext i32 %386 to i64 + %388 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %387 + store volatile i64 %379, i64* %388, align 8, !tbaa !9 + %389 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %390 = add nsw i32 %389, 1 + %391 = sext i32 %390 to i64 + %392 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %391 + store volatile i64 %380, i64* %392, align 8, !tbaa !9 + %393 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %394 = add nsw i32 %393, 2 + %395 = sext i32 %394 to i64 + %396 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %395 + store volatile i64 %385, i64* %396, align 8, !tbaa !9 + %397 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %398 = add nsw i32 %397, 3 + store volatile i32 %398, i32* @P3_is_marked, align 4, !tbaa !5 + br label %399 + +399: ; preds = %378, %382, %374, %370, %367, %364 + %400 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %401 = icmp sgt i32 %400, 3 + br i1 %401, label %402, label %434 + +402: ; preds = %399 + %403 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %404 = icmp slt i32 %403, 4 + br i1 %404, label %405, label %434 + +405: ; preds = %402 + %406 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %407 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %408 = icmp eq i64 %406, %407 + br i1 %408, label %409, label %434 + +409: ; preds = %405 + %410 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %411 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %412 = icmp eq i64 %410, %411 + br i1 %412, label %413, label %434 + +413: ; preds = %409 + %414 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %415 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %416 = icmp sgt i64 %415, %414 + br i1 %416, label %417, label %434 + +417: ; preds = %413 + %418 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %419 = add nsw i32 %418, -4 + store volatile i32 %419, i32* @P2_is_marked, align 4, !tbaa !5 + %420 = add nsw i64 %415, %414 + %421 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %422 = sext i32 %421 to i64 + %423 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %422 + store volatile i64 %414, i64* %423, align 8, !tbaa !9 + %424 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %425 = add nsw i32 %424, 1 + %426 = sext i32 %425 to i64 + %427 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %426 + store volatile i64 %415, i64* %427, align 8, !tbaa !9 + %428 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %429 = add nsw i32 %428, 2 + %430 = sext i32 %429 to i64 + %431 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %430 + store volatile i64 %420, i64* %431, align 8, !tbaa !9 + %432 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %433 = add nsw i32 %432, 3 + store volatile i32 %433, i32* @P3_is_marked, align 4, !tbaa !5 + br label %434 + +434: ; preds = %413, %417, %409, %405, %402, %399 + %435 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %436 = icmp sgt i32 %435, 3 + br i1 %436, label %437, label %469 + +437: ; preds = %434 + %438 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %439 = icmp slt i32 %438, 4 + br i1 %439, label %440, label %469 + +440: ; preds = %437 + %441 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %442 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %443 = icmp eq i64 %441, %442 + br i1 %443, label %444, label %469 + +444: ; preds = %440 + %445 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %446 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %447 = icmp eq i64 %445, %446 + br i1 %447, label %448, label %469 + +448: ; preds = %444 + %449 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %450 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %451 = icmp sgt i64 %450, %449 + br i1 %451, label %452, label %469 + +452: ; preds = %448 + %453 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %454 = add nsw i32 %453, -4 + store volatile i32 %454, i32* @P2_is_marked, align 4, !tbaa !5 + %455 = add nsw i64 %450, %449 + %456 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %457 = sext i32 %456 to i64 + %458 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %457 + store volatile i64 %449, i64* %458, align 8, !tbaa !9 + %459 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %460 = add nsw i32 %459, 1 + %461 = sext i32 %460 to i64 + %462 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %461 + store volatile i64 %450, i64* %462, align 8, !tbaa !9 + %463 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %464 = add nsw i32 %463, 2 + %465 = sext i32 %464 to i64 + %466 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %465 + store volatile i64 %455, i64* %466, align 8, !tbaa !9 + %467 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %468 = add nsw i32 %467, 3 + store volatile i32 %468, i32* @P3_is_marked, align 4, !tbaa !5 + br label %469 + +469: ; preds = %448, %452, %444, %440, %437, %434 + %470 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %471 = icmp sgt i32 %470, 3 + br i1 %471, label %472, label %504 + +472: ; preds = %469 + %473 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %474 = icmp slt i32 %473, 4 + br i1 %474, label %475, label %504 + +475: ; preds = %472 + %476 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %477 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %478 = icmp eq i64 %476, %477 + br i1 %478, label %479, label %504 + +479: ; preds = %475 + %480 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %481 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %482 = icmp eq i64 %480, %481 + br i1 %482, label %483, label %504 + +483: ; preds = %479 + %484 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %485 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %486 = icmp sgt i64 %485, %484 + br i1 %486, label %487, label %504 + +487: ; preds = %483 + %488 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %489 = add nsw i32 %488, -4 + store volatile i32 %489, i32* @P2_is_marked, align 4, !tbaa !5 + %490 = add nsw i64 %485, %484 + %491 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %492 = sext i32 %491 to i64 + %493 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %492 + store volatile i64 %484, i64* %493, align 8, !tbaa !9 + %494 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %495 = add nsw i32 %494, 1 + %496 = sext i32 %495 to i64 + %497 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %496 + store volatile i64 %485, i64* %497, align 8, !tbaa !9 + %498 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %499 = add nsw i32 %498, 2 + %500 = sext i32 %499 to i64 + %501 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %500 + store volatile i64 %490, i64* %501, align 8, !tbaa !9 + %502 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %503 = add nsw i32 %502, 3 + store volatile i32 %503, i32* @P3_is_marked, align 4, !tbaa !5 + br label %504 + +504: ; preds = %483, %487, %479, %475, %472, %469 + %505 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %506 = icmp sgt i32 %505, 3 + br i1 %506, label %507, label %539 + +507: ; preds = %504 + %508 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %509 = icmp slt i32 %508, 4 + br i1 %509, label %510, label %539 + +510: ; preds = %507 + %511 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %512 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %513 = icmp eq i64 %511, %512 + br i1 %513, label %514, label %539 + +514: ; preds = %510 + %515 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %516 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %517 = icmp eq i64 %515, %516 + br i1 %517, label %518, label %539 + +518: ; preds = %514 + %519 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %520 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %521 = icmp sgt i64 %520, %519 + br i1 %521, label %522, label %539 + +522: ; preds = %518 + %523 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %524 = add nsw i32 %523, -4 + store volatile i32 %524, i32* @P2_is_marked, align 4, !tbaa !5 + %525 = add nsw i64 %520, %519 + %526 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %527 = sext i32 %526 to i64 + %528 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %527 + store volatile i64 %519, i64* %528, align 8, !tbaa !9 + %529 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %530 = add nsw i32 %529, 1 + %531 = sext i32 %530 to i64 + %532 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %531 + store volatile i64 %520, i64* %532, align 8, !tbaa !9 + %533 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %534 = add nsw i32 %533, 2 + %535 = sext i32 %534 to i64 + %536 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %535 + store volatile i64 %525, i64* %536, align 8, !tbaa !9 + %537 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %538 = add nsw i32 %537, 3 + store volatile i32 %538, i32* @P3_is_marked, align 4, !tbaa !5 + br label %539 + +539: ; preds = %518, %522, %514, %510, %507, %504 + %540 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %541 = icmp sgt i32 %540, 3 + br i1 %541, label %542, label %574 + +542: ; preds = %539 + %543 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %544 = icmp slt i32 %543, 4 + br i1 %544, label %545, label %574 + +545: ; preds = %542 + %546 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %547 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %548 = icmp eq i64 %546, %547 + br i1 %548, label %549, label %574 + +549: ; preds = %545 + %550 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %551 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %552 = icmp eq i64 %550, %551 + br i1 %552, label %553, label %574 + +553: ; preds = %549 + %554 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %555 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %556 = icmp sgt i64 %555, %554 + br i1 %556, label %557, label %574 + +557: ; preds = %553 + %558 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %559 = add nsw i32 %558, -4 + store volatile i32 %559, i32* @P2_is_marked, align 4, !tbaa !5 + %560 = add nsw i64 %555, %554 + %561 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %562 = sext i32 %561 to i64 + %563 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %562 + store volatile i64 %554, i64* %563, align 8, !tbaa !9 + %564 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %565 = add nsw i32 %564, 1 + %566 = sext i32 %565 to i64 + %567 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %566 + store volatile i64 %555, i64* %567, align 8, !tbaa !9 + %568 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %569 = add nsw i32 %568, 2 + %570 = sext i32 %569 to i64 + %571 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %570 + store volatile i64 %560, i64* %571, align 8, !tbaa !9 + %572 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %573 = add nsw i32 %572, 3 + store volatile i32 %573, i32* @P3_is_marked, align 4, !tbaa !5 + br label %574 + +574: ; preds = %553, %557, %549, %545, %542, %539 + %575 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %576 = icmp sgt i32 %575, 3 + br i1 %576, label %577, label %609 + +577: ; preds = %574 + %578 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %579 = icmp slt i32 %578, 4 + br i1 %579, label %580, label %609 + +580: ; preds = %577 + %581 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %582 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %583 = icmp eq i64 %581, %582 + br i1 %583, label %584, label %609 + +584: ; preds = %580 + %585 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %586 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %587 = icmp eq i64 %585, %586 + br i1 %587, label %588, label %609 + +588: ; preds = %584 + %589 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %590 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %591 = icmp sgt i64 %590, %589 + br i1 %591, label %592, label %609 + +592: ; preds = %588 + %593 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %594 = add nsw i32 %593, -4 + store volatile i32 %594, i32* @P2_is_marked, align 4, !tbaa !5 + %595 = add nsw i64 %590, %589 + %596 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %597 = sext i32 %596 to i64 + %598 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %597 + store volatile i64 %589, i64* %598, align 8, !tbaa !9 + %599 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %600 = add nsw i32 %599, 1 + %601 = sext i32 %600 to i64 + %602 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %601 + store volatile i64 %590, i64* %602, align 8, !tbaa !9 + %603 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %604 = add nsw i32 %603, 2 + %605 = sext i32 %604 to i64 + %606 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %605 + store volatile i64 %595, i64* %606, align 8, !tbaa !9 + %607 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %608 = add nsw i32 %607, 3 + store volatile i32 %608, i32* @P3_is_marked, align 4, !tbaa !5 + br label %609 + +609: ; preds = %588, %592, %584, %580, %577, %574 + %610 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %611 = icmp sgt i32 %610, 3 + br i1 %611, label %612, label %644 + +612: ; preds = %609 + %613 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %614 = icmp slt i32 %613, 4 + br i1 %614, label %615, label %644 + +615: ; preds = %612 + %616 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %617 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %618 = icmp eq i64 %616, %617 + br i1 %618, label %619, label %644 + +619: ; preds = %615 + %620 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %621 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %622 = icmp eq i64 %620, %621 + br i1 %622, label %623, label %644 + +623: ; preds = %619 + %624 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %625 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %626 = icmp sgt i64 %625, %624 + br i1 %626, label %627, label %644 + +627: ; preds = %623 + %628 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %629 = add nsw i32 %628, -4 + store volatile i32 %629, i32* @P2_is_marked, align 4, !tbaa !5 + %630 = add nsw i64 %625, %624 + %631 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %632 = sext i32 %631 to i64 + %633 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %632 + store volatile i64 %624, i64* %633, align 8, !tbaa !9 + %634 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %635 = add nsw i32 %634, 1 + %636 = sext i32 %635 to i64 + %637 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %636 + store volatile i64 %625, i64* %637, align 8, !tbaa !9 + %638 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %639 = add nsw i32 %638, 2 + %640 = sext i32 %639 to i64 + %641 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %640 + store volatile i64 %630, i64* %641, align 8, !tbaa !9 + %642 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %643 = add nsw i32 %642, 3 + store volatile i32 %643, i32* @P3_is_marked, align 4, !tbaa !5 + br label %644 + +644: ; preds = %623, %627, %619, %615, %612, %609 + %645 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %646 = icmp sgt i32 %645, 3 + br i1 %646, label %647, label %679 + +647: ; preds = %644 + %648 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %649 = icmp slt i32 %648, 4 + br i1 %649, label %650, label %679 + +650: ; preds = %647 + %651 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %652 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %653 = icmp eq i64 %651, %652 + br i1 %653, label %654, label %679 + +654: ; preds = %650 + %655 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %656 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %657 = icmp eq i64 %655, %656 + br i1 %657, label %658, label %679 + +658: ; preds = %654 + %659 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %660 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %661 = icmp sgt i64 %660, %659 + br i1 %661, label %662, label %679 + +662: ; preds = %658 + %663 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %664 = add nsw i32 %663, -4 + store volatile i32 %664, i32* @P2_is_marked, align 4, !tbaa !5 + %665 = add nsw i64 %660, %659 + %666 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %667 = sext i32 %666 to i64 + %668 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %667 + store volatile i64 %659, i64* %668, align 8, !tbaa !9 + %669 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %670 = add nsw i32 %669, 1 + %671 = sext i32 %670 to i64 + %672 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %671 + store volatile i64 %660, i64* %672, align 8, !tbaa !9 + %673 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %674 = add nsw i32 %673, 2 + %675 = sext i32 %674 to i64 + %676 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %675 + store volatile i64 %665, i64* %676, align 8, !tbaa !9 + %677 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %678 = add nsw i32 %677, 3 + store volatile i32 %678, i32* @P3_is_marked, align 4, !tbaa !5 + br label %679 + +679: ; preds = %658, %662, %654, %650, %647, %644 + %680 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %681 = icmp sgt i32 %680, 3 + br i1 %681, label %682, label %714 + +682: ; preds = %679 + %683 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %684 = icmp slt i32 %683, 4 + br i1 %684, label %685, label %714 + +685: ; preds = %682 + %686 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %687 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %688 = icmp eq i64 %686, %687 + br i1 %688, label %689, label %714 + +689: ; preds = %685 + %690 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %691 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %692 = icmp eq i64 %690, %691 + br i1 %692, label %693, label %714 + +693: ; preds = %689 + %694 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %695 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %696 = icmp sgt i64 %695, %694 + br i1 %696, label %697, label %714 + +697: ; preds = %693 + %698 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %699 = add nsw i32 %698, -4 + store volatile i32 %699, i32* @P2_is_marked, align 4, !tbaa !5 + %700 = add nsw i64 %695, %694 + %701 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %702 = sext i32 %701 to i64 + %703 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %702 + store volatile i64 %694, i64* %703, align 8, !tbaa !9 + %704 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %705 = add nsw i32 %704, 1 + %706 = sext i32 %705 to i64 + %707 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %706 + store volatile i64 %695, i64* %707, align 8, !tbaa !9 + %708 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %709 = add nsw i32 %708, 2 + %710 = sext i32 %709 to i64 + %711 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %710 + store volatile i64 %700, i64* %711, align 8, !tbaa !9 + %712 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %713 = add nsw i32 %712, 3 + store volatile i32 %713, i32* @P3_is_marked, align 4, !tbaa !5 + br label %714 + +714: ; preds = %693, %697, %689, %685, %682, %679 + %715 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %716 = icmp sgt i32 %715, 3 + br i1 %716, label %717, label %749 + +717: ; preds = %714 + %718 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %719 = icmp slt i32 %718, 4 + br i1 %719, label %720, label %749 + +720: ; preds = %717 + %721 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %722 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %723 = icmp eq i64 %721, %722 + br i1 %723, label %724, label %749 + +724: ; preds = %720 + %725 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %726 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %727 = icmp eq i64 %725, %726 + br i1 %727, label %728, label %749 + +728: ; preds = %724 + %729 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %730 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %731 = icmp sgt i64 %730, %729 + br i1 %731, label %732, label %749 + +732: ; preds = %728 + %733 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %734 = add nsw i32 %733, -4 + store volatile i32 %734, i32* @P2_is_marked, align 4, !tbaa !5 + %735 = add nsw i64 %730, %729 + %736 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %737 = sext i32 %736 to i64 + %738 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %737 + store volatile i64 %729, i64* %738, align 8, !tbaa !9 + %739 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %740 = add nsw i32 %739, 1 + %741 = sext i32 %740 to i64 + %742 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %741 + store volatile i64 %730, i64* %742, align 8, !tbaa !9 + %743 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %744 = add nsw i32 %743, 2 + %745 = sext i32 %744 to i64 + %746 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %745 + store volatile i64 %735, i64* %746, align 8, !tbaa !9 + %747 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %748 = add nsw i32 %747, 3 + store volatile i32 %748, i32* @P3_is_marked, align 4, !tbaa !5 + br label %749 + +749: ; preds = %728, %732, %724, %720, %717, %714 + %750 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %751 = icmp sgt i32 %750, 3 + br i1 %751, label %752, label %784 + +752: ; preds = %749 + %753 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %754 = icmp slt i32 %753, 4 + br i1 %754, label %755, label %784 + +755: ; preds = %752 + %756 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %757 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %758 = icmp eq i64 %756, %757 + br i1 %758, label %759, label %784 + +759: ; preds = %755 + %760 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %761 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %762 = icmp eq i64 %760, %761 + br i1 %762, label %763, label %784 + +763: ; preds = %759 + %764 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %765 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %766 = icmp sgt i64 %765, %764 + br i1 %766, label %767, label %784 + +767: ; preds = %763 + %768 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %769 = add nsw i32 %768, -4 + store volatile i32 %769, i32* @P2_is_marked, align 4, !tbaa !5 + %770 = add nsw i64 %765, %764 + %771 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %772 = sext i32 %771 to i64 + %773 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %772 + store volatile i64 %764, i64* %773, align 8, !tbaa !9 + %774 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %775 = add nsw i32 %774, 1 + %776 = sext i32 %775 to i64 + %777 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %776 + store volatile i64 %765, i64* %777, align 8, !tbaa !9 + %778 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %779 = add nsw i32 %778, 2 + %780 = sext i32 %779 to i64 + %781 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %780 + store volatile i64 %770, i64* %781, align 8, !tbaa !9 + %782 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %783 = add nsw i32 %782, 3 + store volatile i32 %783, i32* @P3_is_marked, align 4, !tbaa !5 + br label %784 + +784: ; preds = %763, %767, %759, %755, %752, %749 + %785 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %786 = icmp sgt i32 %785, 3 + br i1 %786, label %787, label %819 + +787: ; preds = %784 + %788 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %789 = icmp slt i32 %788, 4 + br i1 %789, label %790, label %819 + +790: ; preds = %787 + %791 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %792 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %793 = icmp eq i64 %791, %792 + br i1 %793, label %794, label %819 + +794: ; preds = %790 + %795 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %796 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %797 = icmp eq i64 %795, %796 + br i1 %797, label %798, label %819 + +798: ; preds = %794 + %799 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %800 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %801 = icmp sgt i64 %800, %799 + br i1 %801, label %802, label %819 + +802: ; preds = %798 + %803 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %804 = add nsw i32 %803, -4 + store volatile i32 %804, i32* @P2_is_marked, align 4, !tbaa !5 + %805 = add nsw i64 %800, %799 + %806 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %807 = sext i32 %806 to i64 + %808 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %807 + store volatile i64 %799, i64* %808, align 8, !tbaa !9 + %809 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %810 = add nsw i32 %809, 1 + %811 = sext i32 %810 to i64 + %812 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %811 + store volatile i64 %800, i64* %812, align 8, !tbaa !9 + %813 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %814 = add nsw i32 %813, 2 + %815 = sext i32 %814 to i64 + %816 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %815 + store volatile i64 %805, i64* %816, align 8, !tbaa !9 + %817 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %818 = add nsw i32 %817, 3 + store volatile i32 %818, i32* @P3_is_marked, align 4, !tbaa !5 + br label %819 + +819: ; preds = %798, %802, %794, %790, %787, %784 + %820 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %821 = icmp sgt i32 %820, 3 + br i1 %821, label %822, label %854 + +822: ; preds = %819 + %823 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %824 = icmp slt i32 %823, 4 + br i1 %824, label %825, label %854 + +825: ; preds = %822 + %826 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %827 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %828 = icmp eq i64 %826, %827 + br i1 %828, label %829, label %854 + +829: ; preds = %825 + %830 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %831 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %832 = icmp eq i64 %830, %831 + br i1 %832, label %833, label %854 + +833: ; preds = %829 + %834 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %835 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %836 = icmp sgt i64 %835, %834 + br i1 %836, label %837, label %854 + +837: ; preds = %833 + %838 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %839 = add nsw i32 %838, -4 + store volatile i32 %839, i32* @P2_is_marked, align 4, !tbaa !5 + %840 = add nsw i64 %835, %834 + %841 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %842 = sext i32 %841 to i64 + %843 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %842 + store volatile i64 %834, i64* %843, align 8, !tbaa !9 + %844 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %845 = add nsw i32 %844, 1 + %846 = sext i32 %845 to i64 + %847 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %846 + store volatile i64 %835, i64* %847, align 8, !tbaa !9 + %848 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %849 = add nsw i32 %848, 2 + %850 = sext i32 %849 to i64 + %851 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %850 + store volatile i64 %840, i64* %851, align 8, !tbaa !9 + %852 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %853 = add nsw i32 %852, 3 + store volatile i32 %853, i32* @P3_is_marked, align 4, !tbaa !5 + br label %854 + +854: ; preds = %833, %837, %829, %825, %822, %819 + %855 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %856 = icmp sgt i32 %855, 3 + br i1 %856, label %857, label %889 + +857: ; preds = %854 + %858 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %859 = icmp slt i32 %858, 4 + br i1 %859, label %860, label %889 + +860: ; preds = %857 + %861 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %862 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %863 = icmp eq i64 %861, %862 + br i1 %863, label %864, label %889 + +864: ; preds = %860 + %865 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %866 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %867 = icmp eq i64 %865, %866 + br i1 %867, label %868, label %889 + +868: ; preds = %864 + %869 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %870 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %871 = icmp sgt i64 %870, %869 + br i1 %871, label %872, label %889 + +872: ; preds = %868 + %873 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %874 = add nsw i32 %873, -4 + store volatile i32 %874, i32* @P2_is_marked, align 4, !tbaa !5 + %875 = add nsw i64 %870, %869 + %876 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %877 = sext i32 %876 to i64 + %878 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %877 + store volatile i64 %869, i64* %878, align 8, !tbaa !9 + %879 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %880 = add nsw i32 %879, 1 + %881 = sext i32 %880 to i64 + %882 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %881 + store volatile i64 %870, i64* %882, align 8, !tbaa !9 + %883 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %884 = add nsw i32 %883, 2 + %885 = sext i32 %884 to i64 + %886 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %885 + store volatile i64 %875, i64* %886, align 8, !tbaa !9 + %887 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %888 = add nsw i32 %887, 3 + store volatile i32 %888, i32* @P3_is_marked, align 4, !tbaa !5 + br label %889 + +889: ; preds = %868, %872, %864, %860, %857, %854 + %890 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %891 = icmp sgt i32 %890, 3 + br i1 %891, label %892, label %924 + +892: ; preds = %889 + %893 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %894 = icmp slt i32 %893, 4 + br i1 %894, label %895, label %924 + +895: ; preds = %892 + %896 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %897 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %898 = icmp eq i64 %896, %897 + br i1 %898, label %899, label %924 + +899: ; preds = %895 + %900 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %901 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %902 = icmp eq i64 %900, %901 + br i1 %902, label %903, label %924 + +903: ; preds = %899 + %904 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %905 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %906 = icmp sgt i64 %905, %904 + br i1 %906, label %907, label %924 + +907: ; preds = %903 + %908 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %909 = add nsw i32 %908, -4 + store volatile i32 %909, i32* @P2_is_marked, align 4, !tbaa !5 + %910 = add nsw i64 %905, %904 + %911 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %912 = sext i32 %911 to i64 + %913 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %912 + store volatile i64 %904, i64* %913, align 8, !tbaa !9 + %914 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %915 = add nsw i32 %914, 1 + %916 = sext i32 %915 to i64 + %917 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %916 + store volatile i64 %905, i64* %917, align 8, !tbaa !9 + %918 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %919 = add nsw i32 %918, 2 + %920 = sext i32 %919 to i64 + %921 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %920 + store volatile i64 %910, i64* %921, align 8, !tbaa !9 + %922 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %923 = add nsw i32 %922, 3 + store volatile i32 %923, i32* @P3_is_marked, align 4, !tbaa !5 + br label %924 + +924: ; preds = %903, %907, %899, %895, %892, %889 + %925 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %926 = icmp sgt i32 %925, 3 + br i1 %926, label %927, label %959 + +927: ; preds = %924 + %928 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %929 = icmp slt i32 %928, 4 + br i1 %929, label %930, label %959 + +930: ; preds = %927 + %931 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %932 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %933 = icmp eq i64 %931, %932 + br i1 %933, label %934, label %959 + +934: ; preds = %930 + %935 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %936 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %937 = icmp eq i64 %935, %936 + br i1 %937, label %938, label %959 + +938: ; preds = %934 + %939 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %940 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %941 = icmp sgt i64 %940, %939 + br i1 %941, label %942, label %959 + +942: ; preds = %938 + %943 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %944 = add nsw i32 %943, -4 + store volatile i32 %944, i32* @P2_is_marked, align 4, !tbaa !5 + %945 = add nsw i64 %940, %939 + %946 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %947 = sext i32 %946 to i64 + %948 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %947 + store volatile i64 %939, i64* %948, align 8, !tbaa !9 + %949 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %950 = add nsw i32 %949, 1 + %951 = sext i32 %950 to i64 + %952 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %951 + store volatile i64 %940, i64* %952, align 8, !tbaa !9 + %953 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %954 = add nsw i32 %953, 2 + %955 = sext i32 %954 to i64 + %956 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %955 + store volatile i64 %945, i64* %956, align 8, !tbaa !9 + %957 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %958 = add nsw i32 %957, 3 + store volatile i32 %958, i32* @P3_is_marked, align 4, !tbaa !5 + br label %959 + +959: ; preds = %938, %942, %934, %930, %927, %924 + %960 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %961 = icmp sgt i32 %960, 3 + br i1 %961, label %962, label %994 + +962: ; preds = %959 + %963 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %964 = icmp slt i32 %963, 4 + br i1 %964, label %965, label %994 + +965: ; preds = %962 + %966 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %967 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %968 = icmp eq i64 %966, %967 + br i1 %968, label %969, label %994 + +969: ; preds = %965 + %970 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %971 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %972 = icmp eq i64 %970, %971 + br i1 %972, label %973, label %994 + +973: ; preds = %969 + %974 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %975 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %976 = icmp sgt i64 %975, %974 + br i1 %976, label %977, label %994 + +977: ; preds = %973 + %978 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %979 = add nsw i32 %978, -4 + store volatile i32 %979, i32* @P2_is_marked, align 4, !tbaa !5 + %980 = add nsw i64 %975, %974 + %981 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %982 = sext i32 %981 to i64 + %983 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %982 + store volatile i64 %974, i64* %983, align 8, !tbaa !9 + %984 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %985 = add nsw i32 %984, 1 + %986 = sext i32 %985 to i64 + %987 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %986 + store volatile i64 %975, i64* %987, align 8, !tbaa !9 + %988 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %989 = add nsw i32 %988, 2 + %990 = sext i32 %989 to i64 + %991 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %990 + store volatile i64 %980, i64* %991, align 8, !tbaa !9 + %992 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %993 = add nsw i32 %992, 3 + store volatile i32 %993, i32* @P3_is_marked, align 4, !tbaa !5 + br label %994 + +994: ; preds = %973, %977, %969, %965, %962, %959 + %995 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %996 = icmp sgt i32 %995, 3 + br i1 %996, label %997, label %1029 + +997: ; preds = %994 + %998 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %999 = icmp slt i32 %998, 4 + br i1 %999, label %1000, label %1029 + +1000: ; preds = %997 + %1001 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1002 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1003 = icmp eq i64 %1001, %1002 + br i1 %1003, label %1004, label %1029 + +1004: ; preds = %1000 + %1005 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1006 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1007 = icmp eq i64 %1005, %1006 + br i1 %1007, label %1008, label %1029 + +1008: ; preds = %1004 + %1009 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1010 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1011 = icmp sgt i64 %1010, %1009 + br i1 %1011, label %1012, label %1029 + +1012: ; preds = %1008 + %1013 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1014 = add nsw i32 %1013, -4 + store volatile i32 %1014, i32* @P2_is_marked, align 4, !tbaa !5 + %1015 = add nsw i64 %1010, %1009 + %1016 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1017 = sext i32 %1016 to i64 + %1018 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1017 + store volatile i64 %1009, i64* %1018, align 8, !tbaa !9 + %1019 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1020 = add nsw i32 %1019, 1 + %1021 = sext i32 %1020 to i64 + %1022 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1021 + store volatile i64 %1010, i64* %1022, align 8, !tbaa !9 + %1023 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1024 = add nsw i32 %1023, 2 + %1025 = sext i32 %1024 to i64 + %1026 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1025 + store volatile i64 %1015, i64* %1026, align 8, !tbaa !9 + %1027 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1028 = add nsw i32 %1027, 3 + store volatile i32 %1028, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1029 + +1029: ; preds = %1008, %1012, %1004, %1000, %997, %994 + %1030 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1031 = icmp sgt i32 %1030, 4 + br i1 %1031, label %1032, label %1065 + +1032: ; preds = %1029 + %1033 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1034 = icmp slt i32 %1033, 4 + br i1 %1034, label %1035, label %1065 + +1035: ; preds = %1032 + %1036 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1037 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1038 = icmp eq i64 %1036, %1037 + br i1 %1038, label %1039, label %1065 + +1039: ; preds = %1035 + %1040 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1041 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1042 = icmp eq i64 %1040, %1041 + br i1 %1042, label %1043, label %1065 + +1043: ; preds = %1039 + %1044 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1045 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1046 = icmp sgt i64 %1045, %1044 + br i1 %1046, label %1047, label %1065 + +1047: ; preds = %1043 + %1048 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + store volatile i64 %1048, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1049 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1050 = add nsw i32 %1049, -4 + store volatile i32 %1050, i32* @P2_is_marked, align 4, !tbaa !5 + %1051 = add nsw i64 %1045, %1044 + %1052 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1053 = sext i32 %1052 to i64 + %1054 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1053 + store volatile i64 %1044, i64* %1054, align 8, !tbaa !9 + %1055 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1056 = add nsw i32 %1055, 1 + %1057 = sext i32 %1056 to i64 + %1058 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1057 + store volatile i64 %1045, i64* %1058, align 8, !tbaa !9 + %1059 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1060 = add nsw i32 %1059, 2 + %1061 = sext i32 %1060 to i64 + %1062 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1061 + store volatile i64 %1051, i64* %1062, align 8, !tbaa !9 + %1063 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1064 = add nsw i32 %1063, 3 + store volatile i32 %1064, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1065 + +1065: ; preds = %1043, %1047, %1039, %1035, %1032, %1029 + %1066 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1067 = icmp sgt i32 %1066, 4 + br i1 %1067, label %1068, label %1101 + +1068: ; preds = %1065 + %1069 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1070 = icmp slt i32 %1069, 4 + br i1 %1070, label %1071, label %1101 + +1071: ; preds = %1068 + %1072 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1073 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1074 = icmp eq i64 %1072, %1073 + br i1 %1074, label %1075, label %1101 + +1075: ; preds = %1071 + %1076 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1077 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1078 = icmp eq i64 %1076, %1077 + br i1 %1078, label %1079, label %1101 + +1079: ; preds = %1075 + %1080 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1081 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1082 = icmp sgt i64 %1081, %1080 + br i1 %1082, label %1083, label %1101 + +1083: ; preds = %1079 + %1084 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + store volatile i64 %1084, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1085 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1086 = add nsw i32 %1085, -4 + store volatile i32 %1086, i32* @P2_is_marked, align 4, !tbaa !5 + %1087 = add nsw i64 %1081, %1080 + %1088 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1089 = sext i32 %1088 to i64 + %1090 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1089 + store volatile i64 %1080, i64* %1090, align 8, !tbaa !9 + %1091 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1092 = add nsw i32 %1091, 1 + %1093 = sext i32 %1092 to i64 + %1094 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1093 + store volatile i64 %1081, i64* %1094, align 8, !tbaa !9 + %1095 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1096 = add nsw i32 %1095, 2 + %1097 = sext i32 %1096 to i64 + %1098 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1097 + store volatile i64 %1087, i64* %1098, align 8, !tbaa !9 + %1099 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1100 = add nsw i32 %1099, 3 + store volatile i32 %1100, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1101 + +1101: ; preds = %1079, %1083, %1075, %1071, %1068, %1065 + %1102 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1103 = icmp sgt i32 %1102, 4 + br i1 %1103, label %1104, label %1137 + +1104: ; preds = %1101 + %1105 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1106 = icmp slt i32 %1105, 4 + br i1 %1106, label %1107, label %1137 + +1107: ; preds = %1104 + %1108 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1109 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1110 = icmp eq i64 %1108, %1109 + br i1 %1110, label %1111, label %1137 + +1111: ; preds = %1107 + %1112 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1113 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1114 = icmp eq i64 %1112, %1113 + br i1 %1114, label %1115, label %1137 + +1115: ; preds = %1111 + %1116 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1117 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1118 = icmp sgt i64 %1117, %1116 + br i1 %1118, label %1119, label %1137 + +1119: ; preds = %1115 + %1120 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + store volatile i64 %1120, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1121 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1122 = add nsw i32 %1121, -4 + store volatile i32 %1122, i32* @P2_is_marked, align 4, !tbaa !5 + %1123 = add nsw i64 %1117, %1116 + %1124 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1125 = sext i32 %1124 to i64 + %1126 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1125 + store volatile i64 %1116, i64* %1126, align 8, !tbaa !9 + %1127 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1128 = add nsw i32 %1127, 1 + %1129 = sext i32 %1128 to i64 + %1130 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1129 + store volatile i64 %1117, i64* %1130, align 8, !tbaa !9 + %1131 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1132 = add nsw i32 %1131, 2 + %1133 = sext i32 %1132 to i64 + %1134 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1133 + store volatile i64 %1123, i64* %1134, align 8, !tbaa !9 + %1135 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1136 = add nsw i32 %1135, 3 + store volatile i32 %1136, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1137 + +1137: ; preds = %1115, %1119, %1111, %1107, %1104, %1101 + %1138 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1139 = icmp sgt i32 %1138, 4 + br i1 %1139, label %1140, label %1173 + +1140: ; preds = %1137 + %1141 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1142 = icmp slt i32 %1141, 4 + br i1 %1142, label %1143, label %1173 + +1143: ; preds = %1140 + %1144 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1145 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1146 = icmp eq i64 %1144, %1145 + br i1 %1146, label %1147, label %1173 + +1147: ; preds = %1143 + %1148 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1149 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1150 = icmp eq i64 %1148, %1149 + br i1 %1150, label %1151, label %1173 + +1151: ; preds = %1147 + %1152 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1153 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1154 = icmp sgt i64 %1153, %1152 + br i1 %1154, label %1155, label %1173 + +1155: ; preds = %1151 + %1156 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + store volatile i64 %1156, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1157 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1158 = add nsw i32 %1157, -4 + store volatile i32 %1158, i32* @P2_is_marked, align 4, !tbaa !5 + %1159 = add nsw i64 %1153, %1152 + %1160 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1161 = sext i32 %1160 to i64 + %1162 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1161 + store volatile i64 %1152, i64* %1162, align 8, !tbaa !9 + %1163 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1164 = add nsw i32 %1163, 1 + %1165 = sext i32 %1164 to i64 + %1166 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1165 + store volatile i64 %1153, i64* %1166, align 8, !tbaa !9 + %1167 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1168 = add nsw i32 %1167, 2 + %1169 = sext i32 %1168 to i64 + %1170 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1169 + store volatile i64 %1159, i64* %1170, align 8, !tbaa !9 + %1171 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1172 = add nsw i32 %1171, 3 + store volatile i32 %1172, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1173 + +1173: ; preds = %1151, %1155, %1147, %1143, %1140, %1137 + %1174 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1175 = icmp sgt i32 %1174, 4 + br i1 %1175, label %1176, label %1209 + +1176: ; preds = %1173 + %1177 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1178 = icmp slt i32 %1177, 4 + br i1 %1178, label %1179, label %1209 + +1179: ; preds = %1176 + %1180 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1181 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1182 = icmp eq i64 %1180, %1181 + br i1 %1182, label %1183, label %1209 + +1183: ; preds = %1179 + %1184 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1185 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1186 = icmp eq i64 %1184, %1185 + br i1 %1186, label %1187, label %1209 + +1187: ; preds = %1183 + %1188 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1189 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1190 = icmp sgt i64 %1189, %1188 + br i1 %1190, label %1191, label %1209 + +1191: ; preds = %1187 + %1192 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + store volatile i64 %1192, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1193 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1194 = add nsw i32 %1193, -4 + store volatile i32 %1194, i32* @P2_is_marked, align 4, !tbaa !5 + %1195 = add nsw i64 %1189, %1188 + %1196 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1197 = sext i32 %1196 to i64 + %1198 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1197 + store volatile i64 %1188, i64* %1198, align 8, !tbaa !9 + %1199 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1200 = add nsw i32 %1199, 1 + %1201 = sext i32 %1200 to i64 + %1202 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1201 + store volatile i64 %1189, i64* %1202, align 8, !tbaa !9 + %1203 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1204 = add nsw i32 %1203, 2 + %1205 = sext i32 %1204 to i64 + %1206 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1205 + store volatile i64 %1195, i64* %1206, align 8, !tbaa !9 + %1207 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1208 = add nsw i32 %1207, 3 + store volatile i32 %1208, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1209 + +1209: ; preds = %1187, %1191, %1183, %1179, %1176, %1173 + %1210 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1211 = icmp sgt i32 %1210, 4 + br i1 %1211, label %1212, label %1245 + +1212: ; preds = %1209 + %1213 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1214 = icmp slt i32 %1213, 4 + br i1 %1214, label %1215, label %1245 + +1215: ; preds = %1212 + %1216 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1217 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1218 = icmp eq i64 %1216, %1217 + br i1 %1218, label %1219, label %1245 + +1219: ; preds = %1215 + %1220 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1221 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1222 = icmp eq i64 %1220, %1221 + br i1 %1222, label %1223, label %1245 + +1223: ; preds = %1219 + %1224 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1225 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1226 = icmp sgt i64 %1225, %1224 + br i1 %1226, label %1227, label %1245 + +1227: ; preds = %1223 + %1228 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + store volatile i64 %1228, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1229 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1230 = add nsw i32 %1229, -4 + store volatile i32 %1230, i32* @P2_is_marked, align 4, !tbaa !5 + %1231 = add nsw i64 %1225, %1224 + %1232 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1233 = sext i32 %1232 to i64 + %1234 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1233 + store volatile i64 %1224, i64* %1234, align 8, !tbaa !9 + %1235 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1236 = add nsw i32 %1235, 1 + %1237 = sext i32 %1236 to i64 + %1238 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1237 + store volatile i64 %1225, i64* %1238, align 8, !tbaa !9 + %1239 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1240 = add nsw i32 %1239, 2 + %1241 = sext i32 %1240 to i64 + %1242 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1241 + store volatile i64 %1231, i64* %1242, align 8, !tbaa !9 + %1243 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1244 = add nsw i32 %1243, 3 + store volatile i32 %1244, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1245 + +1245: ; preds = %1223, %1227, %1219, %1215, %1212, %1209 + %1246 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1247 = icmp sgt i32 %1246, 4 + br i1 %1247, label %1248, label %1281 + +1248: ; preds = %1245 + %1249 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1250 = icmp slt i32 %1249, 4 + br i1 %1250, label %1251, label %1281 + +1251: ; preds = %1248 + %1252 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1253 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1254 = icmp eq i64 %1252, %1253 + br i1 %1254, label %1255, label %1281 + +1255: ; preds = %1251 + %1256 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1257 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1258 = icmp eq i64 %1256, %1257 + br i1 %1258, label %1259, label %1281 + +1259: ; preds = %1255 + %1260 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1261 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1262 = icmp sgt i64 %1261, %1260 + br i1 %1262, label %1263, label %1281 + +1263: ; preds = %1259 + %1264 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + store volatile i64 %1264, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1265 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1266 = add nsw i32 %1265, -4 + store volatile i32 %1266, i32* @P2_is_marked, align 4, !tbaa !5 + %1267 = add nsw i64 %1261, %1260 + %1268 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1269 = sext i32 %1268 to i64 + %1270 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1269 + store volatile i64 %1260, i64* %1270, align 8, !tbaa !9 + %1271 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1272 = add nsw i32 %1271, 1 + %1273 = sext i32 %1272 to i64 + %1274 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1273 + store volatile i64 %1261, i64* %1274, align 8, !tbaa !9 + %1275 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1276 = add nsw i32 %1275, 2 + %1277 = sext i32 %1276 to i64 + %1278 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1277 + store volatile i64 %1267, i64* %1278, align 8, !tbaa !9 + %1279 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1280 = add nsw i32 %1279, 3 + store volatile i32 %1280, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1281 + +1281: ; preds = %1259, %1263, %1255, %1251, %1248, %1245 + %1282 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1283 = icmp sgt i32 %1282, 4 + br i1 %1283, label %1284, label %1317 + +1284: ; preds = %1281 + %1285 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1286 = icmp slt i32 %1285, 4 + br i1 %1286, label %1287, label %1317 + +1287: ; preds = %1284 + %1288 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1289 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1290 = icmp eq i64 %1288, %1289 + br i1 %1290, label %1291, label %1317 + +1291: ; preds = %1287 + %1292 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1293 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1294 = icmp eq i64 %1292, %1293 + br i1 %1294, label %1295, label %1317 + +1295: ; preds = %1291 + %1296 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1297 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1298 = icmp sgt i64 %1297, %1296 + br i1 %1298, label %1299, label %1317 + +1299: ; preds = %1295 + %1300 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + store volatile i64 %1300, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1301 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1302 = add nsw i32 %1301, -4 + store volatile i32 %1302, i32* @P2_is_marked, align 4, !tbaa !5 + %1303 = add nsw i64 %1297, %1296 + %1304 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1305 = sext i32 %1304 to i64 + %1306 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1305 + store volatile i64 %1296, i64* %1306, align 8, !tbaa !9 + %1307 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1308 = add nsw i32 %1307, 1 + %1309 = sext i32 %1308 to i64 + %1310 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1309 + store volatile i64 %1297, i64* %1310, align 8, !tbaa !9 + %1311 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1312 = add nsw i32 %1311, 2 + %1313 = sext i32 %1312 to i64 + %1314 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1313 + store volatile i64 %1303, i64* %1314, align 8, !tbaa !9 + %1315 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1316 = add nsw i32 %1315, 3 + store volatile i32 %1316, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1317 + +1317: ; preds = %1295, %1299, %1291, %1287, %1284, %1281 + %1318 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1319 = icmp sgt i32 %1318, 4 + br i1 %1319, label %1320, label %1353 + +1320: ; preds = %1317 + %1321 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1322 = icmp slt i32 %1321, 4 + br i1 %1322, label %1323, label %1353 + +1323: ; preds = %1320 + %1324 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1325 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1326 = icmp eq i64 %1324, %1325 + br i1 %1326, label %1327, label %1353 + +1327: ; preds = %1323 + %1328 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1329 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1330 = icmp eq i64 %1328, %1329 + br i1 %1330, label %1331, label %1353 + +1331: ; preds = %1327 + %1332 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1333 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1334 = icmp sgt i64 %1333, %1332 + br i1 %1334, label %1335, label %1353 + +1335: ; preds = %1331 + %1336 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + store volatile i64 %1336, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1337 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1338 = add nsw i32 %1337, -4 + store volatile i32 %1338, i32* @P2_is_marked, align 4, !tbaa !5 + %1339 = add nsw i64 %1333, %1332 + %1340 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1341 = sext i32 %1340 to i64 + %1342 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1341 + store volatile i64 %1332, i64* %1342, align 8, !tbaa !9 + %1343 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1344 = add nsw i32 %1343, 1 + %1345 = sext i32 %1344 to i64 + %1346 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1345 + store volatile i64 %1333, i64* %1346, align 8, !tbaa !9 + %1347 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1348 = add nsw i32 %1347, 2 + %1349 = sext i32 %1348 to i64 + %1350 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1349 + store volatile i64 %1339, i64* %1350, align 8, !tbaa !9 + %1351 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1352 = add nsw i32 %1351, 3 + store volatile i32 %1352, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1353 + +1353: ; preds = %1331, %1335, %1327, %1323, %1320, %1317 + %1354 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1355 = icmp sgt i32 %1354, 4 + br i1 %1355, label %1356, label %1389 + +1356: ; preds = %1353 + %1357 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1358 = icmp slt i32 %1357, 4 + br i1 %1358, label %1359, label %1389 + +1359: ; preds = %1356 + %1360 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1361 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1362 = icmp eq i64 %1360, %1361 + br i1 %1362, label %1363, label %1389 + +1363: ; preds = %1359 + %1364 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1365 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1366 = icmp eq i64 %1364, %1365 + br i1 %1366, label %1367, label %1389 + +1367: ; preds = %1363 + %1368 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1369 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1370 = icmp sgt i64 %1369, %1368 + br i1 %1370, label %1371, label %1389 + +1371: ; preds = %1367 + %1372 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + store volatile i64 %1372, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1373 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1374 = add nsw i32 %1373, -4 + store volatile i32 %1374, i32* @P2_is_marked, align 4, !tbaa !5 + %1375 = add nsw i64 %1369, %1368 + %1376 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1377 = sext i32 %1376 to i64 + %1378 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1377 + store volatile i64 %1368, i64* %1378, align 8, !tbaa !9 + %1379 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1380 = add nsw i32 %1379, 1 + %1381 = sext i32 %1380 to i64 + %1382 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1381 + store volatile i64 %1369, i64* %1382, align 8, !tbaa !9 + %1383 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1384 = add nsw i32 %1383, 2 + %1385 = sext i32 %1384 to i64 + %1386 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1385 + store volatile i64 %1375, i64* %1386, align 8, !tbaa !9 + %1387 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1388 = add nsw i32 %1387, 3 + store volatile i32 %1388, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1389 + +1389: ; preds = %1367, %1371, %1363, %1359, %1356, %1353 + %1390 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1391 = icmp sgt i32 %1390, 4 + br i1 %1391, label %1392, label %1425 + +1392: ; preds = %1389 + %1393 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1394 = icmp slt i32 %1393, 4 + br i1 %1394, label %1395, label %1425 + +1395: ; preds = %1392 + %1396 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1397 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1398 = icmp eq i64 %1396, %1397 + br i1 %1398, label %1399, label %1425 + +1399: ; preds = %1395 + %1400 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1401 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1402 = icmp eq i64 %1400, %1401 + br i1 %1402, label %1403, label %1425 + +1403: ; preds = %1399 + %1404 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1405 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1406 = icmp sgt i64 %1405, %1404 + br i1 %1406, label %1407, label %1425 + +1407: ; preds = %1403 + %1408 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + store volatile i64 %1408, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1409 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1410 = add nsw i32 %1409, -4 + store volatile i32 %1410, i32* @P2_is_marked, align 4, !tbaa !5 + %1411 = add nsw i64 %1405, %1404 + %1412 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1413 = sext i32 %1412 to i64 + %1414 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1413 + store volatile i64 %1404, i64* %1414, align 8, !tbaa !9 + %1415 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1416 = add nsw i32 %1415, 1 + %1417 = sext i32 %1416 to i64 + %1418 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1417 + store volatile i64 %1405, i64* %1418, align 8, !tbaa !9 + %1419 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1420 = add nsw i32 %1419, 2 + %1421 = sext i32 %1420 to i64 + %1422 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1421 + store volatile i64 %1411, i64* %1422, align 8, !tbaa !9 + %1423 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1424 = add nsw i32 %1423, 3 + store volatile i32 %1424, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1425 + +1425: ; preds = %1403, %1407, %1399, %1395, %1392, %1389 + %1426 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1427 = icmp sgt i32 %1426, 4 + br i1 %1427, label %1428, label %1461 + +1428: ; preds = %1425 + %1429 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1430 = icmp slt i32 %1429, 4 + br i1 %1430, label %1431, label %1461 + +1431: ; preds = %1428 + %1432 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1433 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1434 = icmp eq i64 %1432, %1433 + br i1 %1434, label %1435, label %1461 + +1435: ; preds = %1431 + %1436 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1437 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1438 = icmp eq i64 %1436, %1437 + br i1 %1438, label %1439, label %1461 + +1439: ; preds = %1435 + %1440 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1441 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1442 = icmp sgt i64 %1441, %1440 + br i1 %1442, label %1443, label %1461 + +1443: ; preds = %1439 + %1444 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + store volatile i64 %1444, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1445 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1446 = add nsw i32 %1445, -4 + store volatile i32 %1446, i32* @P2_is_marked, align 4, !tbaa !5 + %1447 = add nsw i64 %1441, %1440 + %1448 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1449 = sext i32 %1448 to i64 + %1450 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1449 + store volatile i64 %1440, i64* %1450, align 8, !tbaa !9 + %1451 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1452 = add nsw i32 %1451, 1 + %1453 = sext i32 %1452 to i64 + %1454 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1453 + store volatile i64 %1441, i64* %1454, align 8, !tbaa !9 + %1455 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1456 = add nsw i32 %1455, 2 + %1457 = sext i32 %1456 to i64 + %1458 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1457 + store volatile i64 %1447, i64* %1458, align 8, !tbaa !9 + %1459 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1460 = add nsw i32 %1459, 3 + store volatile i32 %1460, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1461 + +1461: ; preds = %1439, %1443, %1435, %1431, %1428, %1425 + %1462 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1463 = icmp sgt i32 %1462, 4 + br i1 %1463, label %1464, label %1497 + +1464: ; preds = %1461 + %1465 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1466 = icmp slt i32 %1465, 4 + br i1 %1466, label %1467, label %1497 + +1467: ; preds = %1464 + %1468 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1469 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1470 = icmp eq i64 %1468, %1469 + br i1 %1470, label %1471, label %1497 + +1471: ; preds = %1467 + %1472 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1473 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1474 = icmp eq i64 %1472, %1473 + br i1 %1474, label %1475, label %1497 + +1475: ; preds = %1471 + %1476 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1477 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1478 = icmp sgt i64 %1477, %1476 + br i1 %1478, label %1479, label %1497 + +1479: ; preds = %1475 + %1480 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + store volatile i64 %1480, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1481 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1482 = add nsw i32 %1481, -4 + store volatile i32 %1482, i32* @P2_is_marked, align 4, !tbaa !5 + %1483 = add nsw i64 %1477, %1476 + %1484 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1485 = sext i32 %1484 to i64 + %1486 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1485 + store volatile i64 %1476, i64* %1486, align 8, !tbaa !9 + %1487 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1488 = add nsw i32 %1487, 1 + %1489 = sext i32 %1488 to i64 + %1490 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1489 + store volatile i64 %1477, i64* %1490, align 8, !tbaa !9 + %1491 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1492 = add nsw i32 %1491, 2 + %1493 = sext i32 %1492 to i64 + %1494 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1493 + store volatile i64 %1483, i64* %1494, align 8, !tbaa !9 + %1495 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1496 = add nsw i32 %1495, 3 + store volatile i32 %1496, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1497 + +1497: ; preds = %1475, %1479, %1471, %1467, %1464, %1461 + %1498 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1499 = icmp sgt i32 %1498, 4 + br i1 %1499, label %1500, label %1533 + +1500: ; preds = %1497 + %1501 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1502 = icmp slt i32 %1501, 4 + br i1 %1502, label %1503, label %1533 + +1503: ; preds = %1500 + %1504 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1505 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1506 = icmp eq i64 %1504, %1505 + br i1 %1506, label %1507, label %1533 + +1507: ; preds = %1503 + %1508 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1509 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1510 = icmp eq i64 %1508, %1509 + br i1 %1510, label %1511, label %1533 + +1511: ; preds = %1507 + %1512 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1513 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1514 = icmp sgt i64 %1513, %1512 + br i1 %1514, label %1515, label %1533 + +1515: ; preds = %1511 + %1516 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + store volatile i64 %1516, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1517 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1518 = add nsw i32 %1517, -4 + store volatile i32 %1518, i32* @P2_is_marked, align 4, !tbaa !5 + %1519 = add nsw i64 %1513, %1512 + %1520 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1521 = sext i32 %1520 to i64 + %1522 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1521 + store volatile i64 %1512, i64* %1522, align 8, !tbaa !9 + %1523 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1524 = add nsw i32 %1523, 1 + %1525 = sext i32 %1524 to i64 + %1526 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1525 + store volatile i64 %1513, i64* %1526, align 8, !tbaa !9 + %1527 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1528 = add nsw i32 %1527, 2 + %1529 = sext i32 %1528 to i64 + %1530 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1529 + store volatile i64 %1519, i64* %1530, align 8, !tbaa !9 + %1531 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1532 = add nsw i32 %1531, 3 + store volatile i32 %1532, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1533 + +1533: ; preds = %1511, %1515, %1507, %1503, %1500, %1497 + %1534 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1535 = icmp sgt i32 %1534, 4 + br i1 %1535, label %1536, label %1569 + +1536: ; preds = %1533 + %1537 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1538 = icmp slt i32 %1537, 4 + br i1 %1538, label %1539, label %1569 + +1539: ; preds = %1536 + %1540 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1541 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1542 = icmp eq i64 %1540, %1541 + br i1 %1542, label %1543, label %1569 + +1543: ; preds = %1539 + %1544 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1545 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1546 = icmp eq i64 %1544, %1545 + br i1 %1546, label %1547, label %1569 + +1547: ; preds = %1543 + %1548 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1549 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1550 = icmp sgt i64 %1549, %1548 + br i1 %1550, label %1551, label %1569 + +1551: ; preds = %1547 + %1552 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + store volatile i64 %1552, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1553 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1554 = add nsw i32 %1553, -4 + store volatile i32 %1554, i32* @P2_is_marked, align 4, !tbaa !5 + %1555 = add nsw i64 %1549, %1548 + %1556 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1557 = sext i32 %1556 to i64 + %1558 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1557 + store volatile i64 %1548, i64* %1558, align 8, !tbaa !9 + %1559 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1560 = add nsw i32 %1559, 1 + %1561 = sext i32 %1560 to i64 + %1562 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1561 + store volatile i64 %1549, i64* %1562, align 8, !tbaa !9 + %1563 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1564 = add nsw i32 %1563, 2 + %1565 = sext i32 %1564 to i64 + %1566 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1565 + store volatile i64 %1555, i64* %1566, align 8, !tbaa !9 + %1567 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1568 = add nsw i32 %1567, 3 + store volatile i32 %1568, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1569 + +1569: ; preds = %1547, %1551, %1543, %1539, %1536, %1533 + %1570 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1571 = icmp sgt i32 %1570, 4 + br i1 %1571, label %1572, label %1605 + +1572: ; preds = %1569 + %1573 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1574 = icmp slt i32 %1573, 4 + br i1 %1574, label %1575, label %1605 + +1575: ; preds = %1572 + %1576 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1577 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1578 = icmp eq i64 %1576, %1577 + br i1 %1578, label %1579, label %1605 + +1579: ; preds = %1575 + %1580 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1581 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1582 = icmp eq i64 %1580, %1581 + br i1 %1582, label %1583, label %1605 + +1583: ; preds = %1579 + %1584 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1585 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1586 = icmp sgt i64 %1585, %1584 + br i1 %1586, label %1587, label %1605 + +1587: ; preds = %1583 + %1588 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + store volatile i64 %1588, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1589 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1590 = add nsw i32 %1589, -4 + store volatile i32 %1590, i32* @P2_is_marked, align 4, !tbaa !5 + %1591 = add nsw i64 %1585, %1584 + %1592 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1593 = sext i32 %1592 to i64 + %1594 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1593 + store volatile i64 %1584, i64* %1594, align 8, !tbaa !9 + %1595 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1596 = add nsw i32 %1595, 1 + %1597 = sext i32 %1596 to i64 + %1598 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1597 + store volatile i64 %1585, i64* %1598, align 8, !tbaa !9 + %1599 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1600 = add nsw i32 %1599, 2 + %1601 = sext i32 %1600 to i64 + %1602 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1601 + store volatile i64 %1591, i64* %1602, align 8, !tbaa !9 + %1603 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1604 = add nsw i32 %1603, 3 + store volatile i32 %1604, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1605 + +1605: ; preds = %1583, %1587, %1579, %1575, %1572, %1569 + %1606 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1607 = icmp sgt i32 %1606, 4 + br i1 %1607, label %1608, label %1641 + +1608: ; preds = %1605 + %1609 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1610 = icmp slt i32 %1609, 4 + br i1 %1610, label %1611, label %1641 + +1611: ; preds = %1608 + %1612 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1613 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1614 = icmp eq i64 %1612, %1613 + br i1 %1614, label %1615, label %1641 + +1615: ; preds = %1611 + %1616 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1617 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1618 = icmp eq i64 %1616, %1617 + br i1 %1618, label %1619, label %1641 + +1619: ; preds = %1615 + %1620 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1621 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1622 = icmp sgt i64 %1621, %1620 + br i1 %1622, label %1623, label %1641 + +1623: ; preds = %1619 + %1624 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + store volatile i64 %1624, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1625 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1626 = add nsw i32 %1625, -4 + store volatile i32 %1626, i32* @P2_is_marked, align 4, !tbaa !5 + %1627 = add nsw i64 %1621, %1620 + %1628 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1629 = sext i32 %1628 to i64 + %1630 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1629 + store volatile i64 %1620, i64* %1630, align 8, !tbaa !9 + %1631 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1632 = add nsw i32 %1631, 1 + %1633 = sext i32 %1632 to i64 + %1634 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1633 + store volatile i64 %1621, i64* %1634, align 8, !tbaa !9 + %1635 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1636 = add nsw i32 %1635, 2 + %1637 = sext i32 %1636 to i64 + %1638 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1637 + store volatile i64 %1627, i64* %1638, align 8, !tbaa !9 + %1639 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1640 = add nsw i32 %1639, 3 + store volatile i32 %1640, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1641 + +1641: ; preds = %1619, %1623, %1615, %1611, %1608, %1605 + %1642 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1643 = icmp sgt i32 %1642, 4 + br i1 %1643, label %1644, label %1677 + +1644: ; preds = %1641 + %1645 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1646 = icmp slt i32 %1645, 4 + br i1 %1646, label %1647, label %1677 + +1647: ; preds = %1644 + %1648 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1649 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1650 = icmp eq i64 %1648, %1649 + br i1 %1650, label %1651, label %1677 + +1651: ; preds = %1647 + %1652 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1653 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1654 = icmp eq i64 %1652, %1653 + br i1 %1654, label %1655, label %1677 + +1655: ; preds = %1651 + %1656 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1657 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1658 = icmp sgt i64 %1657, %1656 + br i1 %1658, label %1659, label %1677 + +1659: ; preds = %1655 + %1660 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + store volatile i64 %1660, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1661 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1662 = add nsw i32 %1661, -4 + store volatile i32 %1662, i32* @P2_is_marked, align 4, !tbaa !5 + %1663 = add nsw i64 %1657, %1656 + %1664 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1665 = sext i32 %1664 to i64 + %1666 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1665 + store volatile i64 %1656, i64* %1666, align 8, !tbaa !9 + %1667 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1668 = add nsw i32 %1667, 1 + %1669 = sext i32 %1668 to i64 + %1670 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1669 + store volatile i64 %1657, i64* %1670, align 8, !tbaa !9 + %1671 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1672 = add nsw i32 %1671, 2 + %1673 = sext i32 %1672 to i64 + %1674 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1673 + store volatile i64 %1663, i64* %1674, align 8, !tbaa !9 + %1675 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1676 = add nsw i32 %1675, 3 + store volatile i32 %1676, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1677 + +1677: ; preds = %1655, %1659, %1651, %1647, %1644, %1641 + %1678 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1679 = icmp sgt i32 %1678, 4 + br i1 %1679, label %1680, label %1713 + +1680: ; preds = %1677 + %1681 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1682 = icmp slt i32 %1681, 4 + br i1 %1682, label %1683, label %1713 + +1683: ; preds = %1680 + %1684 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1685 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1686 = icmp eq i64 %1684, %1685 + br i1 %1686, label %1687, label %1713 + +1687: ; preds = %1683 + %1688 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1689 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1690 = icmp eq i64 %1688, %1689 + br i1 %1690, label %1691, label %1713 + +1691: ; preds = %1687 + %1692 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1693 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1694 = icmp sgt i64 %1693, %1692 + br i1 %1694, label %1695, label %1713 + +1695: ; preds = %1691 + %1696 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + store volatile i64 %1696, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1697 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1698 = add nsw i32 %1697, -4 + store volatile i32 %1698, i32* @P2_is_marked, align 4, !tbaa !5 + %1699 = add nsw i64 %1693, %1692 + %1700 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1701 = sext i32 %1700 to i64 + %1702 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1701 + store volatile i64 %1692, i64* %1702, align 8, !tbaa !9 + %1703 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1704 = add nsw i32 %1703, 1 + %1705 = sext i32 %1704 to i64 + %1706 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1705 + store volatile i64 %1693, i64* %1706, align 8, !tbaa !9 + %1707 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1708 = add nsw i32 %1707, 2 + %1709 = sext i32 %1708 to i64 + %1710 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1709 + store volatile i64 %1699, i64* %1710, align 8, !tbaa !9 + %1711 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1712 = add nsw i32 %1711, 3 + store volatile i32 %1712, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1713 + +1713: ; preds = %1691, %1695, %1687, %1683, %1680, %1677 + %1714 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1715 = icmp sgt i32 %1714, 4 + br i1 %1715, label %1716, label %1749 + +1716: ; preds = %1713 + %1717 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1718 = icmp slt i32 %1717, 4 + br i1 %1718, label %1719, label %1749 + +1719: ; preds = %1716 + %1720 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1721 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1722 = icmp eq i64 %1720, %1721 + br i1 %1722, label %1723, label %1749 + +1723: ; preds = %1719 + %1724 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1725 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1726 = icmp eq i64 %1724, %1725 + br i1 %1726, label %1727, label %1749 + +1727: ; preds = %1723 + %1728 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1729 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1730 = icmp sgt i64 %1729, %1728 + br i1 %1730, label %1731, label %1749 + +1731: ; preds = %1727 + %1732 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + store volatile i64 %1732, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1733 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1734 = add nsw i32 %1733, -4 + store volatile i32 %1734, i32* @P2_is_marked, align 4, !tbaa !5 + %1735 = add nsw i64 %1729, %1728 + %1736 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1737 = sext i32 %1736 to i64 + %1738 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1737 + store volatile i64 %1728, i64* %1738, align 8, !tbaa !9 + %1739 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1740 = add nsw i32 %1739, 1 + %1741 = sext i32 %1740 to i64 + %1742 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1741 + store volatile i64 %1729, i64* %1742, align 8, !tbaa !9 + %1743 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1744 = add nsw i32 %1743, 2 + %1745 = sext i32 %1744 to i64 + %1746 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1745 + store volatile i64 %1735, i64* %1746, align 8, !tbaa !9 + %1747 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1748 = add nsw i32 %1747, 3 + store volatile i32 %1748, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1749 + +1749: ; preds = %1727, %1731, %1723, %1719, %1716, %1713 + %1750 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1751 = icmp sgt i32 %1750, 4 + br i1 %1751, label %1752, label %1785 + +1752: ; preds = %1749 + %1753 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1754 = icmp slt i32 %1753, 4 + br i1 %1754, label %1755, label %1785 + +1755: ; preds = %1752 + %1756 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1757 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1758 = icmp eq i64 %1756, %1757 + br i1 %1758, label %1759, label %1785 + +1759: ; preds = %1755 + %1760 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1761 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1762 = icmp eq i64 %1760, %1761 + br i1 %1762, label %1763, label %1785 + +1763: ; preds = %1759 + %1764 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1765 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1766 = icmp sgt i64 %1765, %1764 + br i1 %1766, label %1767, label %1785 + +1767: ; preds = %1763 + %1768 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + store volatile i64 %1768, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1769 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1770 = add nsw i32 %1769, -4 + store volatile i32 %1770, i32* @P2_is_marked, align 4, !tbaa !5 + %1771 = add nsw i64 %1765, %1764 + %1772 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1773 = sext i32 %1772 to i64 + %1774 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1773 + store volatile i64 %1764, i64* %1774, align 8, !tbaa !9 + %1775 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1776 = add nsw i32 %1775, 1 + %1777 = sext i32 %1776 to i64 + %1778 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1777 + store volatile i64 %1765, i64* %1778, align 8, !tbaa !9 + %1779 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1780 = add nsw i32 %1779, 2 + %1781 = sext i32 %1780 to i64 + %1782 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1781 + store volatile i64 %1771, i64* %1782, align 8, !tbaa !9 + %1783 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1784 = add nsw i32 %1783, 3 + store volatile i32 %1784, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1785 + +1785: ; preds = %1763, %1767, %1759, %1755, %1752, %1749 + %1786 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1787 = icmp sgt i32 %1786, 4 + br i1 %1787, label %1788, label %1821 + +1788: ; preds = %1785 + %1789 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1790 = icmp slt i32 %1789, 4 + br i1 %1790, label %1791, label %1821 + +1791: ; preds = %1788 + %1792 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1793 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1794 = icmp eq i64 %1792, %1793 + br i1 %1794, label %1795, label %1821 + +1795: ; preds = %1791 + %1796 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1797 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1798 = icmp eq i64 %1796, %1797 + br i1 %1798, label %1799, label %1821 + +1799: ; preds = %1795 + %1800 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1801 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1802 = icmp sgt i64 %1801, %1800 + br i1 %1802, label %1803, label %1821 + +1803: ; preds = %1799 + %1804 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + store volatile i64 %1804, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1805 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1806 = add nsw i32 %1805, -4 + store volatile i32 %1806, i32* @P2_is_marked, align 4, !tbaa !5 + %1807 = add nsw i64 %1801, %1800 + %1808 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1809 = sext i32 %1808 to i64 + %1810 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1809 + store volatile i64 %1800, i64* %1810, align 8, !tbaa !9 + %1811 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1812 = add nsw i32 %1811, 1 + %1813 = sext i32 %1812 to i64 + %1814 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1813 + store volatile i64 %1801, i64* %1814, align 8, !tbaa !9 + %1815 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1816 = add nsw i32 %1815, 2 + %1817 = sext i32 %1816 to i64 + %1818 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1817 + store volatile i64 %1807, i64* %1818, align 8, !tbaa !9 + %1819 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1820 = add nsw i32 %1819, 3 + store volatile i32 %1820, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1821 + +1821: ; preds = %1799, %1803, %1795, %1791, %1788, %1785 + %1822 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1823 = icmp sgt i32 %1822, 4 + br i1 %1823, label %1824, label %1857 + +1824: ; preds = %1821 + %1825 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1826 = icmp slt i32 %1825, 4 + br i1 %1826, label %1827, label %1857 + +1827: ; preds = %1824 + %1828 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1829 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1830 = icmp eq i64 %1828, %1829 + br i1 %1830, label %1831, label %1857 + +1831: ; preds = %1827 + %1832 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1833 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1834 = icmp eq i64 %1832, %1833 + br i1 %1834, label %1835, label %1857 + +1835: ; preds = %1831 + %1836 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1837 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1838 = icmp sgt i64 %1837, %1836 + br i1 %1838, label %1839, label %1857 + +1839: ; preds = %1835 + %1840 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + store volatile i64 %1840, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1841 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1842 = add nsw i32 %1841, -4 + store volatile i32 %1842, i32* @P2_is_marked, align 4, !tbaa !5 + %1843 = add nsw i64 %1837, %1836 + %1844 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1845 = sext i32 %1844 to i64 + %1846 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1845 + store volatile i64 %1836, i64* %1846, align 8, !tbaa !9 + %1847 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1848 = add nsw i32 %1847, 1 + %1849 = sext i32 %1848 to i64 + %1850 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1849 + store volatile i64 %1837, i64* %1850, align 8, !tbaa !9 + %1851 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1852 = add nsw i32 %1851, 2 + %1853 = sext i32 %1852 to i64 + %1854 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1853 + store volatile i64 %1843, i64* %1854, align 8, !tbaa !9 + %1855 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1856 = add nsw i32 %1855, 3 + store volatile i32 %1856, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1857 + +1857: ; preds = %1835, %1839, %1831, %1827, %1824, %1821 + %1858 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1859 = icmp sgt i32 %1858, 4 + br i1 %1859, label %1860, label %1892 + +1860: ; preds = %1857 + %1861 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1862 = icmp slt i32 %1861, 4 + br i1 %1862, label %1863, label %1892 + +1863: ; preds = %1860 + %1864 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1865 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1866 = icmp eq i64 %1864, %1865 + br i1 %1866, label %1867, label %1892 + +1867: ; preds = %1863 + %1868 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1869 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1870 = icmp eq i64 %1868, %1869 + br i1 %1870, label %1871, label %1892 + +1871: ; preds = %1867 + %1872 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1873 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1874 = icmp sgt i64 %1873, %1872 + br i1 %1874, label %1875, label %1892 + +1875: ; preds = %1871 + %1876 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1877 = add nsw i32 %1876, -4 + store volatile i32 %1877, i32* @P2_is_marked, align 4, !tbaa !5 + %1878 = add nsw i64 %1873, %1872 + %1879 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1880 = sext i32 %1879 to i64 + %1881 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1880 + store volatile i64 %1872, i64* %1881, align 8, !tbaa !9 + %1882 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1883 = add nsw i32 %1882, 1 + %1884 = sext i32 %1883 to i64 + %1885 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1884 + store volatile i64 %1873, i64* %1885, align 8, !tbaa !9 + %1886 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1887 = add nsw i32 %1886, 2 + %1888 = sext i32 %1887 to i64 + %1889 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1888 + store volatile i64 %1878, i64* %1889, align 8, !tbaa !9 + %1890 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1891 = add nsw i32 %1890, 3 + store volatile i32 %1891, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1892 + +1892: ; preds = %1871, %1875, %1867, %1863, %1860, %1857 + %1893 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1894 = icmp sgt i32 %1893, 4 + br i1 %1894, label %1895, label %1928 + +1895: ; preds = %1892 + %1896 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1897 = icmp slt i32 %1896, 4 + br i1 %1897, label %1898, label %1928 + +1898: ; preds = %1895 + %1899 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1900 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1901 = icmp eq i64 %1899, %1900 + br i1 %1901, label %1902, label %1928 + +1902: ; preds = %1898 + %1903 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1904 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1905 = icmp eq i64 %1903, %1904 + br i1 %1905, label %1906, label %1928 + +1906: ; preds = %1902 + %1907 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1908 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1909 = icmp sgt i64 %1908, %1907 + br i1 %1909, label %1910, label %1928 + +1910: ; preds = %1906 + %1911 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + store volatile i64 %1911, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1912 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1913 = add nsw i32 %1912, -4 + store volatile i32 %1913, i32* @P2_is_marked, align 4, !tbaa !5 + %1914 = add nsw i64 %1908, %1907 + %1915 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1916 = sext i32 %1915 to i64 + %1917 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1916 + store volatile i64 %1907, i64* %1917, align 8, !tbaa !9 + %1918 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1919 = add nsw i32 %1918, 1 + %1920 = sext i32 %1919 to i64 + %1921 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1920 + store volatile i64 %1908, i64* %1921, align 8, !tbaa !9 + %1922 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1923 = add nsw i32 %1922, 2 + %1924 = sext i32 %1923 to i64 + %1925 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1924 + store volatile i64 %1914, i64* %1925, align 8, !tbaa !9 + %1926 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1927 = add nsw i32 %1926, 3 + store volatile i32 %1927, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1928 + +1928: ; preds = %1906, %1910, %1902, %1898, %1895, %1892 + %1929 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1930 = icmp sgt i32 %1929, 4 + br i1 %1930, label %1931, label %1963 + +1931: ; preds = %1928 + %1932 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1933 = icmp slt i32 %1932, 4 + br i1 %1933, label %1934, label %1963 + +1934: ; preds = %1931 + %1935 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1936 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1937 = icmp eq i64 %1935, %1936 + br i1 %1937, label %1938, label %1963 + +1938: ; preds = %1934 + %1939 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1940 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1941 = icmp eq i64 %1939, %1940 + br i1 %1941, label %1942, label %1963 + +1942: ; preds = %1938 + %1943 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1944 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %1945 = icmp sgt i64 %1944, %1943 + br i1 %1945, label %1946, label %1963 + +1946: ; preds = %1942 + %1947 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1948 = add nsw i32 %1947, -4 + store volatile i32 %1948, i32* @P2_is_marked, align 4, !tbaa !5 + %1949 = add nsw i64 %1944, %1943 + %1950 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1951 = sext i32 %1950 to i64 + %1952 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1951 + store volatile i64 %1943, i64* %1952, align 8, !tbaa !9 + %1953 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1954 = add nsw i32 %1953, 1 + %1955 = sext i32 %1954 to i64 + %1956 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1955 + store volatile i64 %1944, i64* %1956, align 8, !tbaa !9 + %1957 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1958 = add nsw i32 %1957, 2 + %1959 = sext i32 %1958 to i64 + %1960 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1959 + store volatile i64 %1949, i64* %1960, align 8, !tbaa !9 + %1961 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1962 = add nsw i32 %1961, 3 + store volatile i32 %1962, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1963 + +1963: ; preds = %1942, %1946, %1938, %1934, %1931, %1928 + %1964 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1965 = icmp sgt i32 %1964, 4 + br i1 %1965, label %1966, label %1999 + +1966: ; preds = %1963 + %1967 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1968 = icmp slt i32 %1967, 4 + br i1 %1968, label %1969, label %1999 + +1969: ; preds = %1966 + %1970 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1971 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1972 = icmp eq i64 %1970, %1971 + br i1 %1972, label %1973, label %1999 + +1973: ; preds = %1969 + %1974 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1975 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %1976 = icmp eq i64 %1974, %1975 + br i1 %1976, label %1977, label %1999 + +1977: ; preds = %1973 + %1978 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %1979 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %1980 = icmp sgt i64 %1979, %1978 + br i1 %1980, label %1981, label %1999 + +1981: ; preds = %1977 + %1982 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + store volatile i64 %1982, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %1983 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %1984 = add nsw i32 %1983, -4 + store volatile i32 %1984, i32* @P2_is_marked, align 4, !tbaa !5 + %1985 = add nsw i64 %1979, %1978 + %1986 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1987 = sext i32 %1986 to i64 + %1988 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1987 + store volatile i64 %1978, i64* %1988, align 8, !tbaa !9 + %1989 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1990 = add nsw i32 %1989, 1 + %1991 = sext i32 %1990 to i64 + %1992 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1991 + store volatile i64 %1979, i64* %1992, align 8, !tbaa !9 + %1993 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1994 = add nsw i32 %1993, 2 + %1995 = sext i32 %1994 to i64 + %1996 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %1995 + store volatile i64 %1985, i64* %1996, align 8, !tbaa !9 + %1997 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %1998 = add nsw i32 %1997, 3 + store volatile i32 %1998, i32* @P3_is_marked, align 4, !tbaa !5 + br label %1999 + +1999: ; preds = %1977, %1981, %1973, %1969, %1966, %1963 + %2000 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2001 = icmp sgt i32 %2000, 4 + br i1 %2001, label %2002, label %2034 + +2002: ; preds = %1999 + %2003 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2004 = icmp slt i32 %2003, 4 + br i1 %2004, label %2005, label %2034 + +2005: ; preds = %2002 + %2006 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2007 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %2008 = icmp eq i64 %2006, %2007 + br i1 %2008, label %2009, label %2034 + +2009: ; preds = %2005 + %2010 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2011 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2012 = icmp eq i64 %2010, %2011 + br i1 %2012, label %2013, label %2034 + +2013: ; preds = %2009 + %2014 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2015 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2016 = icmp sgt i64 %2015, %2014 + br i1 %2016, label %2017, label %2034 + +2017: ; preds = %2013 + %2018 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2019 = add nsw i32 %2018, -4 + store volatile i32 %2019, i32* @P2_is_marked, align 4, !tbaa !5 + %2020 = add nsw i64 %2015, %2014 + %2021 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2022 = sext i32 %2021 to i64 + %2023 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2022 + store volatile i64 %2014, i64* %2023, align 8, !tbaa !9 + %2024 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2025 = add nsw i32 %2024, 1 + %2026 = sext i32 %2025 to i64 + %2027 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2026 + store volatile i64 %2015, i64* %2027, align 8, !tbaa !9 + %2028 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2029 = add nsw i32 %2028, 2 + %2030 = sext i32 %2029 to i64 + %2031 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2030 + store volatile i64 %2020, i64* %2031, align 8, !tbaa !9 + %2032 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2033 = add nsw i32 %2032, 3 + store volatile i32 %2033, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2034 + +2034: ; preds = %2013, %2017, %2009, %2005, %2002, %1999 + %2035 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2036 = icmp sgt i32 %2035, 4 + br i1 %2036, label %2037, label %2070 + +2037: ; preds = %2034 + %2038 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2039 = icmp slt i32 %2038, 4 + br i1 %2039, label %2040, label %2070 + +2040: ; preds = %2037 + %2041 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2042 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2043 = icmp eq i64 %2041, %2042 + br i1 %2043, label %2044, label %2070 + +2044: ; preds = %2040 + %2045 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2046 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2047 = icmp eq i64 %2045, %2046 + br i1 %2047, label %2048, label %2070 + +2048: ; preds = %2044 + %2049 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2050 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2051 = icmp sgt i64 %2050, %2049 + br i1 %2051, label %2052, label %2070 + +2052: ; preds = %2048 + %2053 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + store volatile i64 %2053, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2054 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2055 = add nsw i32 %2054, -4 + store volatile i32 %2055, i32* @P2_is_marked, align 4, !tbaa !5 + %2056 = add nsw i64 %2050, %2049 + %2057 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2058 = sext i32 %2057 to i64 + %2059 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2058 + store volatile i64 %2049, i64* %2059, align 8, !tbaa !9 + %2060 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2061 = add nsw i32 %2060, 1 + %2062 = sext i32 %2061 to i64 + %2063 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2062 + store volatile i64 %2050, i64* %2063, align 8, !tbaa !9 + %2064 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2065 = add nsw i32 %2064, 2 + %2066 = sext i32 %2065 to i64 + %2067 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2066 + store volatile i64 %2056, i64* %2067, align 8, !tbaa !9 + %2068 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2069 = add nsw i32 %2068, 3 + store volatile i32 %2069, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2070 + +2070: ; preds = %2048, %2052, %2044, %2040, %2037, %2034 + %2071 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2072 = icmp sgt i32 %2071, 4 + br i1 %2072, label %2073, label %2105 + +2073: ; preds = %2070 + %2074 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2075 = icmp slt i32 %2074, 4 + br i1 %2075, label %2076, label %2105 + +2076: ; preds = %2073 + %2077 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2078 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2079 = icmp eq i64 %2077, %2078 + br i1 %2079, label %2080, label %2105 + +2080: ; preds = %2076 + %2081 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2082 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %2083 = icmp eq i64 %2081, %2082 + br i1 %2083, label %2084, label %2105 + +2084: ; preds = %2080 + %2085 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2086 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2087 = icmp sgt i64 %2086, %2085 + br i1 %2087, label %2088, label %2105 + +2088: ; preds = %2084 + %2089 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2090 = add nsw i32 %2089, -4 + store volatile i32 %2090, i32* @P2_is_marked, align 4, !tbaa !5 + %2091 = add nsw i64 %2086, %2085 + %2092 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2093 = sext i32 %2092 to i64 + %2094 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2093 + store volatile i64 %2085, i64* %2094, align 8, !tbaa !9 + %2095 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2096 = add nsw i32 %2095, 1 + %2097 = sext i32 %2096 to i64 + %2098 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2097 + store volatile i64 %2086, i64* %2098, align 8, !tbaa !9 + %2099 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2100 = add nsw i32 %2099, 2 + %2101 = sext i32 %2100 to i64 + %2102 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2101 + store volatile i64 %2091, i64* %2102, align 8, !tbaa !9 + %2103 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2104 = add nsw i32 %2103, 3 + store volatile i32 %2104, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2105 + +2105: ; preds = %2084, %2088, %2080, %2076, %2073, %2070 + %2106 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2107 = icmp sgt i32 %2106, 4 + br i1 %2107, label %2108, label %2141 + +2108: ; preds = %2105 + %2109 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2110 = icmp slt i32 %2109, 4 + br i1 %2110, label %2111, label %2141 + +2111: ; preds = %2108 + %2112 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2113 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2114 = icmp eq i64 %2112, %2113 + br i1 %2114, label %2115, label %2141 + +2115: ; preds = %2111 + %2116 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2117 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %2118 = icmp eq i64 %2116, %2117 + br i1 %2118, label %2119, label %2141 + +2119: ; preds = %2115 + %2120 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2121 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2122 = icmp sgt i64 %2121, %2120 + br i1 %2122, label %2123, label %2141 + +2123: ; preds = %2119 + %2124 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + store volatile i64 %2124, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2125 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2126 = add nsw i32 %2125, -4 + store volatile i32 %2126, i32* @P2_is_marked, align 4, !tbaa !5 + %2127 = add nsw i64 %2121, %2120 + %2128 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2129 = sext i32 %2128 to i64 + %2130 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2129 + store volatile i64 %2120, i64* %2130, align 8, !tbaa !9 + %2131 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2132 = add nsw i32 %2131, 1 + %2133 = sext i32 %2132 to i64 + %2134 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2133 + store volatile i64 %2121, i64* %2134, align 8, !tbaa !9 + %2135 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2136 = add nsw i32 %2135, 2 + %2137 = sext i32 %2136 to i64 + %2138 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2137 + store volatile i64 %2127, i64* %2138, align 8, !tbaa !9 + %2139 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2140 = add nsw i32 %2139, 3 + store volatile i32 %2140, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2141 + +2141: ; preds = %2119, %2123, %2115, %2111, %2108, %2105 + %2142 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2143 = icmp sgt i32 %2142, 4 + br i1 %2143, label %2144, label %2177 + +2144: ; preds = %2141 + %2145 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2146 = icmp slt i32 %2145, 4 + br i1 %2146, label %2147, label %2177 + +2147: ; preds = %2144 + %2148 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2149 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2150 = icmp eq i64 %2148, %2149 + br i1 %2150, label %2151, label %2177 + +2151: ; preds = %2147 + %2152 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2153 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2154 = icmp eq i64 %2152, %2153 + br i1 %2154, label %2155, label %2177 + +2155: ; preds = %2151 + %2156 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2157 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2158 = icmp sgt i64 %2157, %2156 + br i1 %2158, label %2159, label %2177 + +2159: ; preds = %2155 + %2160 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + store volatile i64 %2160, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2161 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2162 = add nsw i32 %2161, -4 + store volatile i32 %2162, i32* @P2_is_marked, align 4, !tbaa !5 + %2163 = add nsw i64 %2157, %2156 + %2164 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2165 = sext i32 %2164 to i64 + %2166 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2165 + store volatile i64 %2156, i64* %2166, align 8, !tbaa !9 + %2167 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2168 = add nsw i32 %2167, 1 + %2169 = sext i32 %2168 to i64 + %2170 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2169 + store volatile i64 %2157, i64* %2170, align 8, !tbaa !9 + %2171 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2172 = add nsw i32 %2171, 2 + %2173 = sext i32 %2172 to i64 + %2174 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2173 + store volatile i64 %2163, i64* %2174, align 8, !tbaa !9 + %2175 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2176 = add nsw i32 %2175, 3 + store volatile i32 %2176, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2177 + +2177: ; preds = %2155, %2159, %2151, %2147, %2144, %2141 + %2178 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2179 = icmp sgt i32 %2178, 4 + br i1 %2179, label %2180, label %2213 + +2180: ; preds = %2177 + %2181 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2182 = icmp slt i32 %2181, 4 + br i1 %2182, label %2183, label %2213 + +2183: ; preds = %2180 + %2184 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2185 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %2186 = icmp eq i64 %2184, %2185 + br i1 %2186, label %2187, label %2213 + +2187: ; preds = %2183 + %2188 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2189 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2190 = icmp eq i64 %2188, %2189 + br i1 %2190, label %2191, label %2213 + +2191: ; preds = %2187 + %2192 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2193 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2194 = icmp sgt i64 %2193, %2192 + br i1 %2194, label %2195, label %2213 + +2195: ; preds = %2191 + %2196 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + store volatile i64 %2196, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2197 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2198 = add nsw i32 %2197, -4 + store volatile i32 %2198, i32* @P2_is_marked, align 4, !tbaa !5 + %2199 = add nsw i64 %2193, %2192 + %2200 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2201 = sext i32 %2200 to i64 + %2202 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2201 + store volatile i64 %2192, i64* %2202, align 8, !tbaa !9 + %2203 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2204 = add nsw i32 %2203, 1 + %2205 = sext i32 %2204 to i64 + %2206 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2205 + store volatile i64 %2193, i64* %2206, align 8, !tbaa !9 + %2207 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2208 = add nsw i32 %2207, 2 + %2209 = sext i32 %2208 to i64 + %2210 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2209 + store volatile i64 %2199, i64* %2210, align 8, !tbaa !9 + %2211 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2212 = add nsw i32 %2211, 3 + store volatile i32 %2212, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2213 + +2213: ; preds = %2191, %2195, %2187, %2183, %2180, %2177 + %2214 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2215 = icmp sgt i32 %2214, 4 + br i1 %2215, label %2216, label %2248 + +2216: ; preds = %2213 + %2217 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2218 = icmp slt i32 %2217, 4 + br i1 %2218, label %2219, label %2248 + +2219: ; preds = %2216 + %2220 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2221 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %2222 = icmp eq i64 %2220, %2221 + br i1 %2222, label %2223, label %2248 + +2223: ; preds = %2219 + %2224 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2225 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2226 = icmp eq i64 %2224, %2225 + br i1 %2226, label %2227, label %2248 + +2227: ; preds = %2223 + %2228 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2229 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2230 = icmp sgt i64 %2229, %2228 + br i1 %2230, label %2231, label %2248 + +2231: ; preds = %2227 + %2232 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2233 = add nsw i32 %2232, -4 + store volatile i32 %2233, i32* @P2_is_marked, align 4, !tbaa !5 + %2234 = add nsw i64 %2229, %2228 + %2235 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2236 = sext i32 %2235 to i64 + %2237 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2236 + store volatile i64 %2228, i64* %2237, align 8, !tbaa !9 + %2238 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2239 = add nsw i32 %2238, 1 + %2240 = sext i32 %2239 to i64 + %2241 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2240 + store volatile i64 %2229, i64* %2241, align 8, !tbaa !9 + %2242 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2243 = add nsw i32 %2242, 2 + %2244 = sext i32 %2243 to i64 + %2245 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2244 + store volatile i64 %2234, i64* %2245, align 8, !tbaa !9 + %2246 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2247 = add nsw i32 %2246, 3 + store volatile i32 %2247, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2248 + +2248: ; preds = %2227, %2231, %2223, %2219, %2216, %2213 + %2249 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2250 = icmp sgt i32 %2249, 4 + br i1 %2250, label %2251, label %2284 + +2251: ; preds = %2248 + %2252 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2253 = icmp slt i32 %2252, 4 + br i1 %2253, label %2254, label %2284 + +2254: ; preds = %2251 + %2255 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2256 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2257 = icmp eq i64 %2255, %2256 + br i1 %2257, label %2258, label %2284 + +2258: ; preds = %2254 + %2259 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2260 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2261 = icmp eq i64 %2259, %2260 + br i1 %2261, label %2262, label %2284 + +2262: ; preds = %2258 + %2263 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2264 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2265 = icmp sgt i64 %2264, %2263 + br i1 %2265, label %2266, label %2284 + +2266: ; preds = %2262 + %2267 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + store volatile i64 %2267, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2268 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2269 = add nsw i32 %2268, -4 + store volatile i32 %2269, i32* @P2_is_marked, align 4, !tbaa !5 + %2270 = add nsw i64 %2264, %2263 + %2271 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2272 = sext i32 %2271 to i64 + %2273 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2272 + store volatile i64 %2263, i64* %2273, align 8, !tbaa !9 + %2274 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2275 = add nsw i32 %2274, 1 + %2276 = sext i32 %2275 to i64 + %2277 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2276 + store volatile i64 %2264, i64* %2277, align 8, !tbaa !9 + %2278 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2279 = add nsw i32 %2278, 2 + %2280 = sext i32 %2279 to i64 + %2281 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2280 + store volatile i64 %2270, i64* %2281, align 8, !tbaa !9 + %2282 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2283 = add nsw i32 %2282, 3 + store volatile i32 %2283, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2284 + +2284: ; preds = %2262, %2266, %2258, %2254, %2251, %2248 + %2285 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2286 = icmp sgt i32 %2285, 4 + br i1 %2286, label %2287, label %2319 + +2287: ; preds = %2284 + %2288 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2289 = icmp slt i32 %2288, 4 + br i1 %2289, label %2290, label %2319 + +2290: ; preds = %2287 + %2291 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2292 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2293 = icmp eq i64 %2291, %2292 + br i1 %2293, label %2294, label %2319 + +2294: ; preds = %2290 + %2295 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2296 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %2297 = icmp eq i64 %2295, %2296 + br i1 %2297, label %2298, label %2319 + +2298: ; preds = %2294 + %2299 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2300 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2301 = icmp sgt i64 %2300, %2299 + br i1 %2301, label %2302, label %2319 + +2302: ; preds = %2298 + %2303 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2304 = add nsw i32 %2303, -4 + store volatile i32 %2304, i32* @P2_is_marked, align 4, !tbaa !5 + %2305 = add nsw i64 %2300, %2299 + %2306 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2307 = sext i32 %2306 to i64 + %2308 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2307 + store volatile i64 %2299, i64* %2308, align 8, !tbaa !9 + %2309 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2310 = add nsw i32 %2309, 1 + %2311 = sext i32 %2310 to i64 + %2312 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2311 + store volatile i64 %2300, i64* %2312, align 8, !tbaa !9 + %2313 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2314 = add nsw i32 %2313, 2 + %2315 = sext i32 %2314 to i64 + %2316 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2315 + store volatile i64 %2305, i64* %2316, align 8, !tbaa !9 + %2317 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2318 = add nsw i32 %2317, 3 + store volatile i32 %2318, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2319 + +2319: ; preds = %2298, %2302, %2294, %2290, %2287, %2284 + %2320 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2321 = icmp sgt i32 %2320, 4 + br i1 %2321, label %2322, label %2355 + +2322: ; preds = %2319 + %2323 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2324 = icmp slt i32 %2323, 4 + br i1 %2324, label %2325, label %2355 + +2325: ; preds = %2322 + %2326 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2327 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2328 = icmp eq i64 %2326, %2327 + br i1 %2328, label %2329, label %2355 + +2329: ; preds = %2325 + %2330 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2331 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2332 = icmp eq i64 %2330, %2331 + br i1 %2332, label %2333, label %2355 + +2333: ; preds = %2329 + %2334 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %2335 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2336 = icmp sgt i64 %2335, %2334 + br i1 %2336, label %2337, label %2355 + +2337: ; preds = %2333 + %2338 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + store volatile i64 %2338, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2339 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2340 = add nsw i32 %2339, -4 + store volatile i32 %2340, i32* @P2_is_marked, align 4, !tbaa !5 + %2341 = add nsw i64 %2335, %2334 + %2342 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2343 = sext i32 %2342 to i64 + %2344 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2343 + store volatile i64 %2334, i64* %2344, align 8, !tbaa !9 + %2345 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2346 = add nsw i32 %2345, 1 + %2347 = sext i32 %2346 to i64 + %2348 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2347 + store volatile i64 %2335, i64* %2348, align 8, !tbaa !9 + %2349 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2350 = add nsw i32 %2349, 2 + %2351 = sext i32 %2350 to i64 + %2352 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2351 + store volatile i64 %2341, i64* %2352, align 8, !tbaa !9 + %2353 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2354 = add nsw i32 %2353, 3 + store volatile i32 %2354, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2355 + +2355: ; preds = %2333, %2337, %2329, %2325, %2322, %2319 + %2356 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2357 = icmp sgt i32 %2356, 4 + br i1 %2357, label %2358, label %2391 + +2358: ; preds = %2355 + %2359 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2360 = icmp slt i32 %2359, 4 + br i1 %2360, label %2361, label %2391 + +2361: ; preds = %2358 + %2362 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2363 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2364 = icmp eq i64 %2362, %2363 + br i1 %2364, label %2365, label %2391 + +2365: ; preds = %2361 + %2366 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2367 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2368 = icmp eq i64 %2366, %2367 + br i1 %2368, label %2369, label %2391 + +2369: ; preds = %2365 + %2370 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %2371 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2372 = icmp sgt i64 %2371, %2370 + br i1 %2372, label %2373, label %2391 + +2373: ; preds = %2369 + %2374 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + store volatile i64 %2374, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2375 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2376 = add nsw i32 %2375, -4 + store volatile i32 %2376, i32* @P2_is_marked, align 4, !tbaa !5 + %2377 = add nsw i64 %2371, %2370 + %2378 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2379 = sext i32 %2378 to i64 + %2380 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2379 + store volatile i64 %2370, i64* %2380, align 8, !tbaa !9 + %2381 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2382 = add nsw i32 %2381, 1 + %2383 = sext i32 %2382 to i64 + %2384 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2383 + store volatile i64 %2371, i64* %2384, align 8, !tbaa !9 + %2385 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2386 = add nsw i32 %2385, 2 + %2387 = sext i32 %2386 to i64 + %2388 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2387 + store volatile i64 %2377, i64* %2388, align 8, !tbaa !9 + %2389 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2390 = add nsw i32 %2389, 3 + store volatile i32 %2390, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2391 + +2391: ; preds = %2369, %2373, %2365, %2361, %2358, %2355 + %2392 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2393 = icmp sgt i32 %2392, 4 + br i1 %2393, label %2394, label %2427 + +2394: ; preds = %2391 + %2395 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2396 = icmp slt i32 %2395, 4 + br i1 %2396, label %2397, label %2427 + +2397: ; preds = %2394 + %2398 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2399 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2400 = icmp eq i64 %2398, %2399 + br i1 %2400, label %2401, label %2427 + +2401: ; preds = %2397 + %2402 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2403 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2404 = icmp eq i64 %2402, %2403 + br i1 %2404, label %2405, label %2427 + +2405: ; preds = %2401 + %2406 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %2407 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2408 = icmp sgt i64 %2407, %2406 + br i1 %2408, label %2409, label %2427 + +2409: ; preds = %2405 + %2410 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + store volatile i64 %2410, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2411 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2412 = add nsw i32 %2411, -4 + store volatile i32 %2412, i32* @P2_is_marked, align 4, !tbaa !5 + %2413 = add nsw i64 %2407, %2406 + %2414 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2415 = sext i32 %2414 to i64 + %2416 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2415 + store volatile i64 %2406, i64* %2416, align 8, !tbaa !9 + %2417 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2418 = add nsw i32 %2417, 1 + %2419 = sext i32 %2418 to i64 + %2420 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2419 + store volatile i64 %2407, i64* %2420, align 8, !tbaa !9 + %2421 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2422 = add nsw i32 %2421, 2 + %2423 = sext i32 %2422 to i64 + %2424 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2423 + store volatile i64 %2413, i64* %2424, align 8, !tbaa !9 + %2425 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2426 = add nsw i32 %2425, 3 + store volatile i32 %2426, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2427 + +2427: ; preds = %2405, %2409, %2401, %2397, %2394, %2391 + %2428 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2429 = icmp sgt i32 %2428, 4 + br i1 %2429, label %2430, label %2463 + +2430: ; preds = %2427 + %2431 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2432 = icmp slt i32 %2431, 4 + br i1 %2432, label %2433, label %2463 + +2433: ; preds = %2430 + %2434 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2435 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2436 = icmp eq i64 %2434, %2435 + br i1 %2436, label %2437, label %2463 + +2437: ; preds = %2433 + %2438 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2439 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2440 = icmp eq i64 %2438, %2439 + br i1 %2440, label %2441, label %2463 + +2441: ; preds = %2437 + %2442 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %2443 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2444 = icmp sgt i64 %2443, %2442 + br i1 %2444, label %2445, label %2463 + +2445: ; preds = %2441 + %2446 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + store volatile i64 %2446, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2447 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2448 = add nsw i32 %2447, -4 + store volatile i32 %2448, i32* @P2_is_marked, align 4, !tbaa !5 + %2449 = add nsw i64 %2443, %2442 + %2450 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2451 = sext i32 %2450 to i64 + %2452 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2451 + store volatile i64 %2442, i64* %2452, align 8, !tbaa !9 + %2453 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2454 = add nsw i32 %2453, 1 + %2455 = sext i32 %2454 to i64 + %2456 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2455 + store volatile i64 %2443, i64* %2456, align 8, !tbaa !9 + %2457 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2458 = add nsw i32 %2457, 2 + %2459 = sext i32 %2458 to i64 + %2460 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2459 + store volatile i64 %2449, i64* %2460, align 8, !tbaa !9 + %2461 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2462 = add nsw i32 %2461, 3 + store volatile i32 %2462, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2463 + +2463: ; preds = %2441, %2445, %2437, %2433, %2430, %2427 + %2464 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2465 = icmp sgt i32 %2464, 4 + br i1 %2465, label %2466, label %2499 + +2466: ; preds = %2463 + %2467 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2468 = icmp slt i32 %2467, 4 + br i1 %2468, label %2469, label %2499 + +2469: ; preds = %2466 + %2470 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2471 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2472 = icmp eq i64 %2470, %2471 + br i1 %2472, label %2473, label %2499 + +2473: ; preds = %2469 + %2474 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2475 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2476 = icmp eq i64 %2474, %2475 + br i1 %2476, label %2477, label %2499 + +2477: ; preds = %2473 + %2478 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %2479 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2480 = icmp sgt i64 %2479, %2478 + br i1 %2480, label %2481, label %2499 + +2481: ; preds = %2477 + %2482 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + store volatile i64 %2482, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2483 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2484 = add nsw i32 %2483, -4 + store volatile i32 %2484, i32* @P2_is_marked, align 4, !tbaa !5 + %2485 = add nsw i64 %2479, %2478 + %2486 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2487 = sext i32 %2486 to i64 + %2488 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2487 + store volatile i64 %2478, i64* %2488, align 8, !tbaa !9 + %2489 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2490 = add nsw i32 %2489, 1 + %2491 = sext i32 %2490 to i64 + %2492 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2491 + store volatile i64 %2479, i64* %2492, align 8, !tbaa !9 + %2493 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2494 = add nsw i32 %2493, 2 + %2495 = sext i32 %2494 to i64 + %2496 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2495 + store volatile i64 %2485, i64* %2496, align 8, !tbaa !9 + %2497 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2498 = add nsw i32 %2497, 3 + store volatile i32 %2498, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2499 + +2499: ; preds = %2477, %2481, %2473, %2469, %2466, %2463 + %2500 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2501 = icmp sgt i32 %2500, 4 + br i1 %2501, label %2502, label %2534 + +2502: ; preds = %2499 + %2503 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2504 = icmp slt i32 %2503, 4 + br i1 %2504, label %2505, label %2534 + +2505: ; preds = %2502 + %2506 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2507 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2508 = icmp eq i64 %2506, %2507 + br i1 %2508, label %2509, label %2534 + +2509: ; preds = %2505 + %2510 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2511 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2512 = icmp eq i64 %2510, %2511 + br i1 %2512, label %2513, label %2534 + +2513: ; preds = %2509 + %2514 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %2515 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2516 = icmp sgt i64 %2515, %2514 + br i1 %2516, label %2517, label %2534 + +2517: ; preds = %2513 + %2518 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2519 = add nsw i32 %2518, -4 + store volatile i32 %2519, i32* @P2_is_marked, align 4, !tbaa !5 + %2520 = add nsw i64 %2515, %2514 + %2521 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2522 = sext i32 %2521 to i64 + %2523 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2522 + store volatile i64 %2514, i64* %2523, align 8, !tbaa !9 + %2524 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2525 = add nsw i32 %2524, 1 + %2526 = sext i32 %2525 to i64 + %2527 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2526 + store volatile i64 %2515, i64* %2527, align 8, !tbaa !9 + %2528 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2529 = add nsw i32 %2528, 2 + %2530 = sext i32 %2529 to i64 + %2531 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2530 + store volatile i64 %2520, i64* %2531, align 8, !tbaa !9 + %2532 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2533 = add nsw i32 %2532, 3 + store volatile i32 %2533, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2534 + +2534: ; preds = %2513, %2517, %2509, %2505, %2502, %2499 + %2535 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2536 = icmp sgt i32 %2535, 4 + br i1 %2536, label %2537, label %2570 + +2537: ; preds = %2534 + %2538 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2539 = icmp slt i32 %2538, 4 + br i1 %2539, label %2540, label %2570 + +2540: ; preds = %2537 + %2541 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2542 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2543 = icmp eq i64 %2541, %2542 + br i1 %2543, label %2544, label %2570 + +2544: ; preds = %2540 + %2545 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2546 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2547 = icmp eq i64 %2545, %2546 + br i1 %2547, label %2548, label %2570 + +2548: ; preds = %2544 + %2549 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %2550 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2551 = icmp sgt i64 %2550, %2549 + br i1 %2551, label %2552, label %2570 + +2552: ; preds = %2548 + %2553 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + store volatile i64 %2553, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2554 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2555 = add nsw i32 %2554, -4 + store volatile i32 %2555, i32* @P2_is_marked, align 4, !tbaa !5 + %2556 = add nsw i64 %2550, %2549 + %2557 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2558 = sext i32 %2557 to i64 + %2559 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2558 + store volatile i64 %2549, i64* %2559, align 8, !tbaa !9 + %2560 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2561 = add nsw i32 %2560, 1 + %2562 = sext i32 %2561 to i64 + %2563 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2562 + store volatile i64 %2550, i64* %2563, align 8, !tbaa !9 + %2564 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2565 = add nsw i32 %2564, 2 + %2566 = sext i32 %2565 to i64 + %2567 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2566 + store volatile i64 %2556, i64* %2567, align 8, !tbaa !9 + %2568 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2569 = add nsw i32 %2568, 3 + store volatile i32 %2569, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2570 + +2570: ; preds = %2548, %2552, %2544, %2540, %2537, %2534 + %2571 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2572 = icmp sgt i32 %2571, 4 + br i1 %2572, label %2573, label %2605 + +2573: ; preds = %2570 + %2574 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2575 = icmp slt i32 %2574, 4 + br i1 %2575, label %2576, label %2605 + +2576: ; preds = %2573 + %2577 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2578 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2579 = icmp eq i64 %2577, %2578 + br i1 %2579, label %2580, label %2605 + +2580: ; preds = %2576 + %2581 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2582 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2583 = icmp eq i64 %2581, %2582 + br i1 %2583, label %2584, label %2605 + +2584: ; preds = %2580 + %2585 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %2586 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2587 = icmp sgt i64 %2586, %2585 + br i1 %2587, label %2588, label %2605 + +2588: ; preds = %2584 + %2589 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2590 = add nsw i32 %2589, -4 + store volatile i32 %2590, i32* @P2_is_marked, align 4, !tbaa !5 + %2591 = add nsw i64 %2586, %2585 + %2592 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2593 = sext i32 %2592 to i64 + %2594 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2593 + store volatile i64 %2585, i64* %2594, align 8, !tbaa !9 + %2595 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2596 = add nsw i32 %2595, 1 + %2597 = sext i32 %2596 to i64 + %2598 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2597 + store volatile i64 %2586, i64* %2598, align 8, !tbaa !9 + %2599 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2600 = add nsw i32 %2599, 2 + %2601 = sext i32 %2600 to i64 + %2602 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2601 + store volatile i64 %2591, i64* %2602, align 8, !tbaa !9 + %2603 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2604 = add nsw i32 %2603, 3 + store volatile i32 %2604, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2605 + +2605: ; preds = %2584, %2588, %2580, %2576, %2573, %2570 + %2606 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2607 = icmp sgt i32 %2606, 4 + br i1 %2607, label %2608, label %2641 + +2608: ; preds = %2605 + %2609 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2610 = icmp slt i32 %2609, 4 + br i1 %2610, label %2611, label %2641 + +2611: ; preds = %2608 + %2612 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2613 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2614 = icmp eq i64 %2612, %2613 + br i1 %2614, label %2615, label %2641 + +2615: ; preds = %2611 + %2616 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2617 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2618 = icmp eq i64 %2616, %2617 + br i1 %2618, label %2619, label %2641 + +2619: ; preds = %2615 + %2620 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %2621 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2622 = icmp sgt i64 %2621, %2620 + br i1 %2622, label %2623, label %2641 + +2623: ; preds = %2619 + %2624 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + store volatile i64 %2624, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2625 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2626 = add nsw i32 %2625, -4 + store volatile i32 %2626, i32* @P2_is_marked, align 4, !tbaa !5 + %2627 = add nsw i64 %2621, %2620 + %2628 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2629 = sext i32 %2628 to i64 + %2630 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2629 + store volatile i64 %2620, i64* %2630, align 8, !tbaa !9 + %2631 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2632 = add nsw i32 %2631, 1 + %2633 = sext i32 %2632 to i64 + %2634 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2633 + store volatile i64 %2621, i64* %2634, align 8, !tbaa !9 + %2635 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2636 = add nsw i32 %2635, 2 + %2637 = sext i32 %2636 to i64 + %2638 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2637 + store volatile i64 %2627, i64* %2638, align 8, !tbaa !9 + %2639 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2640 = add nsw i32 %2639, 3 + store volatile i32 %2640, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2641 + +2641: ; preds = %2619, %2623, %2615, %2611, %2608, %2605 + %2642 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2643 = icmp sgt i32 %2642, 4 + br i1 %2643, label %2644, label %2676 + +2644: ; preds = %2641 + %2645 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2646 = icmp slt i32 %2645, 4 + br i1 %2646, label %2647, label %2676 + +2647: ; preds = %2644 + %2648 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2649 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2650 = icmp eq i64 %2648, %2649 + br i1 %2650, label %2651, label %2676 + +2651: ; preds = %2647 + %2652 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2653 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2654 = icmp eq i64 %2652, %2653 + br i1 %2654, label %2655, label %2676 + +2655: ; preds = %2651 + %2656 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %2657 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2658 = icmp sgt i64 %2657, %2656 + br i1 %2658, label %2659, label %2676 + +2659: ; preds = %2655 + %2660 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2661 = add nsw i32 %2660, -4 + store volatile i32 %2661, i32* @P2_is_marked, align 4, !tbaa !5 + %2662 = add nsw i64 %2657, %2656 + %2663 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2664 = sext i32 %2663 to i64 + %2665 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2664 + store volatile i64 %2656, i64* %2665, align 8, !tbaa !9 + %2666 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2667 = add nsw i32 %2666, 1 + %2668 = sext i32 %2667 to i64 + %2669 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2668 + store volatile i64 %2657, i64* %2669, align 8, !tbaa !9 + %2670 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2671 = add nsw i32 %2670, 2 + %2672 = sext i32 %2671 to i64 + %2673 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2672 + store volatile i64 %2662, i64* %2673, align 8, !tbaa !9 + %2674 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2675 = add nsw i32 %2674, 3 + store volatile i32 %2675, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2676 + +2676: ; preds = %2655, %2659, %2651, %2647, %2644, %2641 + %2677 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2678 = icmp sgt i32 %2677, 4 + br i1 %2678, label %2679, label %2712 + +2679: ; preds = %2676 + %2680 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2681 = icmp slt i32 %2680, 4 + br i1 %2681, label %2682, label %2712 + +2682: ; preds = %2679 + %2683 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2684 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2685 = icmp eq i64 %2683, %2684 + br i1 %2685, label %2686, label %2712 + +2686: ; preds = %2682 + %2687 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2688 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2689 = icmp eq i64 %2687, %2688 + br i1 %2689, label %2690, label %2712 + +2690: ; preds = %2686 + %2691 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %2692 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2693 = icmp sgt i64 %2692, %2691 + br i1 %2693, label %2694, label %2712 + +2694: ; preds = %2690 + %2695 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + store volatile i64 %2695, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2696 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2697 = add nsw i32 %2696, -4 + store volatile i32 %2697, i32* @P2_is_marked, align 4, !tbaa !5 + %2698 = add nsw i64 %2692, %2691 + %2699 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2700 = sext i32 %2699 to i64 + %2701 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2700 + store volatile i64 %2691, i64* %2701, align 8, !tbaa !9 + %2702 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2703 = add nsw i32 %2702, 1 + %2704 = sext i32 %2703 to i64 + %2705 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2704 + store volatile i64 %2692, i64* %2705, align 8, !tbaa !9 + %2706 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2707 = add nsw i32 %2706, 2 + %2708 = sext i32 %2707 to i64 + %2709 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2708 + store volatile i64 %2698, i64* %2709, align 8, !tbaa !9 + %2710 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2711 = add nsw i32 %2710, 3 + store volatile i32 %2711, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2712 + +2712: ; preds = %2690, %2694, %2686, %2682, %2679, %2676 + %2713 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2714 = icmp sgt i32 %2713, 4 + br i1 %2714, label %2715, label %2747 + +2715: ; preds = %2712 + %2716 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2717 = icmp slt i32 %2716, 4 + br i1 %2717, label %2718, label %2747 + +2718: ; preds = %2715 + %2719 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2720 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2721 = icmp eq i64 %2719, %2720 + br i1 %2721, label %2722, label %2747 + +2722: ; preds = %2718 + %2723 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2724 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2725 = icmp eq i64 %2723, %2724 + br i1 %2725, label %2726, label %2747 + +2726: ; preds = %2722 + %2727 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %2728 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2729 = icmp sgt i64 %2728, %2727 + br i1 %2729, label %2730, label %2747 + +2730: ; preds = %2726 + %2731 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2732 = add nsw i32 %2731, -4 + store volatile i32 %2732, i32* @P2_is_marked, align 4, !tbaa !5 + %2733 = add nsw i64 %2728, %2727 + %2734 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2735 = sext i32 %2734 to i64 + %2736 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2735 + store volatile i64 %2727, i64* %2736, align 8, !tbaa !9 + %2737 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2738 = add nsw i32 %2737, 1 + %2739 = sext i32 %2738 to i64 + %2740 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2739 + store volatile i64 %2728, i64* %2740, align 8, !tbaa !9 + %2741 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2742 = add nsw i32 %2741, 2 + %2743 = sext i32 %2742 to i64 + %2744 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2743 + store volatile i64 %2733, i64* %2744, align 8, !tbaa !9 + %2745 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2746 = add nsw i32 %2745, 3 + store volatile i32 %2746, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2747 + +2747: ; preds = %2726, %2730, %2722, %2718, %2715, %2712 + %2748 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2749 = icmp sgt i32 %2748, 4 + br i1 %2749, label %2750, label %2783 + +2750: ; preds = %2747 + %2751 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2752 = icmp slt i32 %2751, 4 + br i1 %2752, label %2753, label %2783 + +2753: ; preds = %2750 + %2754 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2755 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2756 = icmp eq i64 %2754, %2755 + br i1 %2756, label %2757, label %2783 + +2757: ; preds = %2753 + %2758 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2759 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2760 = icmp eq i64 %2758, %2759 + br i1 %2760, label %2761, label %2783 + +2761: ; preds = %2757 + %2762 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %2763 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2764 = icmp sgt i64 %2763, %2762 + br i1 %2764, label %2765, label %2783 + +2765: ; preds = %2761 + %2766 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + store volatile i64 %2766, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2767 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2768 = add nsw i32 %2767, -4 + store volatile i32 %2768, i32* @P2_is_marked, align 4, !tbaa !5 + %2769 = add nsw i64 %2763, %2762 + %2770 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2771 = sext i32 %2770 to i64 + %2772 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2771 + store volatile i64 %2762, i64* %2772, align 8, !tbaa !9 + %2773 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2774 = add nsw i32 %2773, 1 + %2775 = sext i32 %2774 to i64 + %2776 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2775 + store volatile i64 %2763, i64* %2776, align 8, !tbaa !9 + %2777 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2778 = add nsw i32 %2777, 2 + %2779 = sext i32 %2778 to i64 + %2780 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2779 + store volatile i64 %2769, i64* %2780, align 8, !tbaa !9 + %2781 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2782 = add nsw i32 %2781, 3 + store volatile i32 %2782, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2783 + +2783: ; preds = %2761, %2765, %2757, %2753, %2750, %2747 + %2784 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2785 = icmp sgt i32 %2784, 4 + br i1 %2785, label %2786, label %2819 + +2786: ; preds = %2783 + %2787 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2788 = icmp slt i32 %2787, 4 + br i1 %2788, label %2789, label %2819 + +2789: ; preds = %2786 + %2790 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2791 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2792 = icmp eq i64 %2790, %2791 + br i1 %2792, label %2793, label %2819 + +2793: ; preds = %2789 + %2794 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2795 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2796 = icmp eq i64 %2794, %2795 + br i1 %2796, label %2797, label %2819 + +2797: ; preds = %2793 + %2798 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %2799 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2800 = icmp sgt i64 %2799, %2798 + br i1 %2800, label %2801, label %2819 + +2801: ; preds = %2797 + %2802 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + store volatile i64 %2802, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2803 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2804 = add nsw i32 %2803, -4 + store volatile i32 %2804, i32* @P2_is_marked, align 4, !tbaa !5 + %2805 = add nsw i64 %2799, %2798 + %2806 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2807 = sext i32 %2806 to i64 + %2808 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2807 + store volatile i64 %2798, i64* %2808, align 8, !tbaa !9 + %2809 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2810 = add nsw i32 %2809, 1 + %2811 = sext i32 %2810 to i64 + %2812 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2811 + store volatile i64 %2799, i64* %2812, align 8, !tbaa !9 + %2813 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2814 = add nsw i32 %2813, 2 + %2815 = sext i32 %2814 to i64 + %2816 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2815 + store volatile i64 %2805, i64* %2816, align 8, !tbaa !9 + %2817 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2818 = add nsw i32 %2817, 3 + store volatile i32 %2818, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2819 + +2819: ; preds = %2797, %2801, %2793, %2789, %2786, %2783 + %2820 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2821 = icmp sgt i32 %2820, 4 + br i1 %2821, label %2822, label %2855 + +2822: ; preds = %2819 + %2823 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2824 = icmp slt i32 %2823, 4 + br i1 %2824, label %2825, label %2855 + +2825: ; preds = %2822 + %2826 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2827 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2828 = icmp eq i64 %2826, %2827 + br i1 %2828, label %2829, label %2855 + +2829: ; preds = %2825 + %2830 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2831 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2832 = icmp eq i64 %2830, %2831 + br i1 %2832, label %2833, label %2855 + +2833: ; preds = %2829 + %2834 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %2835 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2836 = icmp sgt i64 %2835, %2834 + br i1 %2836, label %2837, label %2855 + +2837: ; preds = %2833 + %2838 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + store volatile i64 %2838, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2839 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2840 = add nsw i32 %2839, -4 + store volatile i32 %2840, i32* @P2_is_marked, align 4, !tbaa !5 + %2841 = add nsw i64 %2835, %2834 + %2842 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2843 = sext i32 %2842 to i64 + %2844 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2843 + store volatile i64 %2834, i64* %2844, align 8, !tbaa !9 + %2845 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2846 = add nsw i32 %2845, 1 + %2847 = sext i32 %2846 to i64 + %2848 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2847 + store volatile i64 %2835, i64* %2848, align 8, !tbaa !9 + %2849 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2850 = add nsw i32 %2849, 2 + %2851 = sext i32 %2850 to i64 + %2852 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2851 + store volatile i64 %2841, i64* %2852, align 8, !tbaa !9 + %2853 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2854 = add nsw i32 %2853, 3 + store volatile i32 %2854, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2855 + +2855: ; preds = %2833, %2837, %2829, %2825, %2822, %2819 + %2856 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2857 = icmp sgt i32 %2856, 4 + br i1 %2857, label %2858, label %2890 + +2858: ; preds = %2855 + %2859 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2860 = icmp slt i32 %2859, 4 + br i1 %2860, label %2861, label %2890 + +2861: ; preds = %2858 + %2862 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2863 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2864 = icmp eq i64 %2862, %2863 + br i1 %2864, label %2865, label %2890 + +2865: ; preds = %2861 + %2866 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2867 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2868 = icmp eq i64 %2866, %2867 + br i1 %2868, label %2869, label %2890 + +2869: ; preds = %2865 + %2870 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %2871 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2872 = icmp sgt i64 %2871, %2870 + br i1 %2872, label %2873, label %2890 + +2873: ; preds = %2869 + %2874 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2875 = add nsw i32 %2874, -4 + store volatile i32 %2875, i32* @P2_is_marked, align 4, !tbaa !5 + %2876 = add nsw i64 %2871, %2870 + %2877 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2878 = sext i32 %2877 to i64 + %2879 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2878 + store volatile i64 %2870, i64* %2879, align 8, !tbaa !9 + %2880 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2881 = add nsw i32 %2880, 1 + %2882 = sext i32 %2881 to i64 + %2883 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2882 + store volatile i64 %2871, i64* %2883, align 8, !tbaa !9 + %2884 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2885 = add nsw i32 %2884, 2 + %2886 = sext i32 %2885 to i64 + %2887 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2886 + store volatile i64 %2876, i64* %2887, align 8, !tbaa !9 + %2888 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2889 = add nsw i32 %2888, 3 + store volatile i32 %2889, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2890 + +2890: ; preds = %2869, %2873, %2865, %2861, %2858, %2855 + %2891 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2892 = icmp sgt i32 %2891, 4 + br i1 %2892, label %2893, label %2926 + +2893: ; preds = %2890 + %2894 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2895 = icmp slt i32 %2894, 4 + br i1 %2895, label %2896, label %2926 + +2896: ; preds = %2893 + %2897 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2898 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2899 = icmp eq i64 %2897, %2898 + br i1 %2899, label %2900, label %2926 + +2900: ; preds = %2896 + %2901 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2902 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2903 = icmp eq i64 %2901, %2902 + br i1 %2903, label %2904, label %2926 + +2904: ; preds = %2900 + %2905 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %2906 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2907 = icmp sgt i64 %2906, %2905 + br i1 %2907, label %2908, label %2926 + +2908: ; preds = %2904 + %2909 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + store volatile i64 %2909, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2910 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2911 = add nsw i32 %2910, -4 + store volatile i32 %2911, i32* @P2_is_marked, align 4, !tbaa !5 + %2912 = add nsw i64 %2906, %2905 + %2913 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2914 = sext i32 %2913 to i64 + %2915 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2914 + store volatile i64 %2905, i64* %2915, align 8, !tbaa !9 + %2916 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2917 = add nsw i32 %2916, 1 + %2918 = sext i32 %2917 to i64 + %2919 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2918 + store volatile i64 %2906, i64* %2919, align 8, !tbaa !9 + %2920 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2921 = add nsw i32 %2920, 2 + %2922 = sext i32 %2921 to i64 + %2923 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2922 + store volatile i64 %2912, i64* %2923, align 8, !tbaa !9 + %2924 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2925 = add nsw i32 %2924, 3 + store volatile i32 %2925, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2926 + +2926: ; preds = %2904, %2908, %2900, %2896, %2893, %2890 + %2927 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2928 = icmp sgt i32 %2927, 4 + br i1 %2928, label %2929, label %2961 + +2929: ; preds = %2926 + %2930 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2931 = icmp slt i32 %2930, 4 + br i1 %2931, label %2932, label %2961 + +2932: ; preds = %2929 + %2933 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2934 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2935 = icmp eq i64 %2933, %2934 + br i1 %2935, label %2936, label %2961 + +2936: ; preds = %2932 + %2937 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2938 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2939 = icmp eq i64 %2937, %2938 + br i1 %2939, label %2940, label %2961 + +2940: ; preds = %2936 + %2941 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %2942 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2943 = icmp sgt i64 %2942, %2941 + br i1 %2943, label %2944, label %2961 + +2944: ; preds = %2940 + %2945 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2946 = add nsw i32 %2945, -4 + store volatile i32 %2946, i32* @P2_is_marked, align 4, !tbaa !5 + %2947 = add nsw i64 %2942, %2941 + %2948 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2949 = sext i32 %2948 to i64 + %2950 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2949 + store volatile i64 %2941, i64* %2950, align 8, !tbaa !9 + %2951 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2952 = add nsw i32 %2951, 1 + %2953 = sext i32 %2952 to i64 + %2954 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2953 + store volatile i64 %2942, i64* %2954, align 8, !tbaa !9 + %2955 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2956 = add nsw i32 %2955, 2 + %2957 = sext i32 %2956 to i64 + %2958 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2957 + store volatile i64 %2947, i64* %2958, align 8, !tbaa !9 + %2959 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2960 = add nsw i32 %2959, 3 + store volatile i32 %2960, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2961 + +2961: ; preds = %2940, %2944, %2936, %2932, %2929, %2926 + %2962 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2963 = icmp sgt i32 %2962, 4 + br i1 %2963, label %2964, label %2997 + +2964: ; preds = %2961 + %2965 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2966 = icmp slt i32 %2965, 4 + br i1 %2966, label %2967, label %2997 + +2967: ; preds = %2964 + %2968 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2969 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %2970 = icmp eq i64 %2968, %2969 + br i1 %2970, label %2971, label %2997 + +2971: ; preds = %2967 + %2972 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2973 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %2974 = icmp eq i64 %2972, %2973 + br i1 %2974, label %2975, label %2997 + +2975: ; preds = %2971 + %2976 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %2977 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2978 = icmp sgt i64 %2977, %2976 + br i1 %2978, label %2979, label %2997 + +2979: ; preds = %2975 + %2980 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + store volatile i64 %2980, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %2981 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2982 = add nsw i32 %2981, -4 + store volatile i32 %2982, i32* @P2_is_marked, align 4, !tbaa !5 + %2983 = add nsw i64 %2977, %2976 + %2984 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2985 = sext i32 %2984 to i64 + %2986 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2985 + store volatile i64 %2976, i64* %2986, align 8, !tbaa !9 + %2987 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2988 = add nsw i32 %2987, 1 + %2989 = sext i32 %2988 to i64 + %2990 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2989 + store volatile i64 %2977, i64* %2990, align 8, !tbaa !9 + %2991 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2992 = add nsw i32 %2991, 2 + %2993 = sext i32 %2992 to i64 + %2994 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %2993 + store volatile i64 %2983, i64* %2994, align 8, !tbaa !9 + %2995 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %2996 = add nsw i32 %2995, 3 + store volatile i32 %2996, i32* @P3_is_marked, align 4, !tbaa !5 + br label %2997 + +2997: ; preds = %2975, %2979, %2971, %2967, %2964, %2961 + %2998 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %2999 = icmp sgt i32 %2998, 4 + br i1 %2999, label %3000, label %3033 + +3000: ; preds = %2997 + %3001 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3002 = icmp slt i32 %3001, 4 + br i1 %3002, label %3003, label %3033 + +3003: ; preds = %3000 + %3004 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3005 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %3006 = icmp eq i64 %3004, %3005 + br i1 %3006, label %3007, label %3033 + +3007: ; preds = %3003 + %3008 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3009 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3010 = icmp eq i64 %3008, %3009 + br i1 %3010, label %3011, label %3033 + +3011: ; preds = %3007 + %3012 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %3013 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3014 = icmp sgt i64 %3013, %3012 + br i1 %3014, label %3015, label %3033 + +3015: ; preds = %3011 + %3016 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + store volatile i64 %3016, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3017 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3018 = add nsw i32 %3017, -4 + store volatile i32 %3018, i32* @P2_is_marked, align 4, !tbaa !5 + %3019 = add nsw i64 %3013, %3012 + %3020 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3021 = sext i32 %3020 to i64 + %3022 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3021 + store volatile i64 %3012, i64* %3022, align 8, !tbaa !9 + %3023 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3024 = add nsw i32 %3023, 1 + %3025 = sext i32 %3024 to i64 + %3026 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3025 + store volatile i64 %3013, i64* %3026, align 8, !tbaa !9 + %3027 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3028 = add nsw i32 %3027, 2 + %3029 = sext i32 %3028 to i64 + %3030 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3029 + store volatile i64 %3019, i64* %3030, align 8, !tbaa !9 + %3031 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3032 = add nsw i32 %3031, 3 + store volatile i32 %3032, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3033 + +3033: ; preds = %3011, %3015, %3007, %3003, %3000, %2997 + %3034 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3035 = icmp sgt i32 %3034, 4 + br i1 %3035, label %3036, label %3069 + +3036: ; preds = %3033 + %3037 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3038 = icmp slt i32 %3037, 4 + br i1 %3038, label %3039, label %3069 + +3039: ; preds = %3036 + %3040 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3041 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3042 = icmp eq i64 %3040, %3041 + br i1 %3042, label %3043, label %3069 + +3043: ; preds = %3039 + %3044 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3045 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3046 = icmp eq i64 %3044, %3045 + br i1 %3046, label %3047, label %3069 + +3047: ; preds = %3043 + %3048 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %3049 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3050 = icmp sgt i64 %3049, %3048 + br i1 %3050, label %3051, label %3069 + +3051: ; preds = %3047 + %3052 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + store volatile i64 %3052, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3053 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3054 = add nsw i32 %3053, -4 + store volatile i32 %3054, i32* @P2_is_marked, align 4, !tbaa !5 + %3055 = add nsw i64 %3049, %3048 + %3056 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3057 = sext i32 %3056 to i64 + %3058 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3057 + store volatile i64 %3048, i64* %3058, align 8, !tbaa !9 + %3059 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3060 = add nsw i32 %3059, 1 + %3061 = sext i32 %3060 to i64 + %3062 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3061 + store volatile i64 %3049, i64* %3062, align 8, !tbaa !9 + %3063 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3064 = add nsw i32 %3063, 2 + %3065 = sext i32 %3064 to i64 + %3066 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3065 + store volatile i64 %3055, i64* %3066, align 8, !tbaa !9 + %3067 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3068 = add nsw i32 %3067, 3 + store volatile i32 %3068, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3069 + +3069: ; preds = %3047, %3051, %3043, %3039, %3036, %3033 + %3070 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3071 = icmp sgt i32 %3070, 4 + br i1 %3071, label %3072, label %3105 + +3072: ; preds = %3069 + %3073 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3074 = icmp slt i32 %3073, 4 + br i1 %3074, label %3075, label %3105 + +3075: ; preds = %3072 + %3076 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3077 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3078 = icmp eq i64 %3076, %3077 + br i1 %3078, label %3079, label %3105 + +3079: ; preds = %3075 + %3080 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3081 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %3082 = icmp eq i64 %3080, %3081 + br i1 %3082, label %3083, label %3105 + +3083: ; preds = %3079 + %3084 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %3085 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3086 = icmp sgt i64 %3085, %3084 + br i1 %3086, label %3087, label %3105 + +3087: ; preds = %3083 + %3088 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + store volatile i64 %3088, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3089 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3090 = add nsw i32 %3089, -4 + store volatile i32 %3090, i32* @P2_is_marked, align 4, !tbaa !5 + %3091 = add nsw i64 %3085, %3084 + %3092 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3093 = sext i32 %3092 to i64 + %3094 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3093 + store volatile i64 %3084, i64* %3094, align 8, !tbaa !9 + %3095 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3096 = add nsw i32 %3095, 1 + %3097 = sext i32 %3096 to i64 + %3098 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3097 + store volatile i64 %3085, i64* %3098, align 8, !tbaa !9 + %3099 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3100 = add nsw i32 %3099, 2 + %3101 = sext i32 %3100 to i64 + %3102 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3101 + store volatile i64 %3091, i64* %3102, align 8, !tbaa !9 + %3103 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3104 = add nsw i32 %3103, 3 + store volatile i32 %3104, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3105 + +3105: ; preds = %3083, %3087, %3079, %3075, %3072, %3069 + %3106 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3107 = icmp sgt i32 %3106, 4 + br i1 %3107, label %3108, label %3141 + +3108: ; preds = %3105 + %3109 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3110 = icmp slt i32 %3109, 4 + br i1 %3110, label %3111, label %3141 + +3111: ; preds = %3108 + %3112 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3113 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3114 = icmp eq i64 %3112, %3113 + br i1 %3114, label %3115, label %3141 + +3115: ; preds = %3111 + %3116 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3117 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3118 = icmp eq i64 %3116, %3117 + br i1 %3118, label %3119, label %3141 + +3119: ; preds = %3115 + %3120 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %3121 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3122 = icmp sgt i64 %3121, %3120 + br i1 %3122, label %3123, label %3141 + +3123: ; preds = %3119 + %3124 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + store volatile i64 %3124, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3125 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3126 = add nsw i32 %3125, -4 + store volatile i32 %3126, i32* @P2_is_marked, align 4, !tbaa !5 + %3127 = add nsw i64 %3121, %3120 + %3128 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3129 = sext i32 %3128 to i64 + %3130 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3129 + store volatile i64 %3120, i64* %3130, align 8, !tbaa !9 + %3131 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3132 = add nsw i32 %3131, 1 + %3133 = sext i32 %3132 to i64 + %3134 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3133 + store volatile i64 %3121, i64* %3134, align 8, !tbaa !9 + %3135 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3136 = add nsw i32 %3135, 2 + %3137 = sext i32 %3136 to i64 + %3138 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3137 + store volatile i64 %3127, i64* %3138, align 8, !tbaa !9 + %3139 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3140 = add nsw i32 %3139, 3 + store volatile i32 %3140, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3141 + +3141: ; preds = %3119, %3123, %3115, %3111, %3108, %3105 + %3142 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3143 = icmp sgt i32 %3142, 4 + br i1 %3143, label %3144, label %3176 + +3144: ; preds = %3141 + %3145 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3146 = icmp slt i32 %3145, 4 + br i1 %3146, label %3147, label %3176 + +3147: ; preds = %3144 + %3148 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3149 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %3150 = icmp eq i64 %3148, %3149 + br i1 %3150, label %3151, label %3176 + +3151: ; preds = %3147 + %3152 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3153 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3154 = icmp eq i64 %3152, %3153 + br i1 %3154, label %3155, label %3176 + +3155: ; preds = %3151 + %3156 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %3157 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3158 = icmp sgt i64 %3157, %3156 + br i1 %3158, label %3159, label %3176 + +3159: ; preds = %3155 + %3160 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3161 = add nsw i32 %3160, -4 + store volatile i32 %3161, i32* @P2_is_marked, align 4, !tbaa !5 + %3162 = add nsw i64 %3157, %3156 + %3163 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3164 = sext i32 %3163 to i64 + %3165 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3164 + store volatile i64 %3156, i64* %3165, align 8, !tbaa !9 + %3166 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3167 = add nsw i32 %3166, 1 + %3168 = sext i32 %3167 to i64 + %3169 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3168 + store volatile i64 %3157, i64* %3169, align 8, !tbaa !9 + %3170 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3171 = add nsw i32 %3170, 2 + %3172 = sext i32 %3171 to i64 + %3173 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3172 + store volatile i64 %3162, i64* %3173, align 8, !tbaa !9 + %3174 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3175 = add nsw i32 %3174, 3 + store volatile i32 %3175, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3176 + +3176: ; preds = %3155, %3159, %3151, %3147, %3144, %3141 + %3177 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3178 = icmp sgt i32 %3177, 4 + br i1 %3178, label %3179, label %3212 + +3179: ; preds = %3176 + %3180 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3181 = icmp slt i32 %3180, 4 + br i1 %3181, label %3182, label %3212 + +3182: ; preds = %3179 + %3183 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3184 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3185 = icmp eq i64 %3183, %3184 + br i1 %3185, label %3186, label %3212 + +3186: ; preds = %3182 + %3187 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3188 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3189 = icmp eq i64 %3187, %3188 + br i1 %3189, label %3190, label %3212 + +3190: ; preds = %3186 + %3191 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %3192 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3193 = icmp sgt i64 %3192, %3191 + br i1 %3193, label %3194, label %3212 + +3194: ; preds = %3190 + %3195 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + store volatile i64 %3195, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3196 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3197 = add nsw i32 %3196, -4 + store volatile i32 %3197, i32* @P2_is_marked, align 4, !tbaa !5 + %3198 = add nsw i64 %3192, %3191 + %3199 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3200 = sext i32 %3199 to i64 + %3201 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3200 + store volatile i64 %3191, i64* %3201, align 8, !tbaa !9 + %3202 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3203 = add nsw i32 %3202, 1 + %3204 = sext i32 %3203 to i64 + %3205 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3204 + store volatile i64 %3192, i64* %3205, align 8, !tbaa !9 + %3206 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3207 = add nsw i32 %3206, 2 + %3208 = sext i32 %3207 to i64 + %3209 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3208 + store volatile i64 %3198, i64* %3209, align 8, !tbaa !9 + %3210 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3211 = add nsw i32 %3210, 3 + store volatile i32 %3211, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3212 + +3212: ; preds = %3190, %3194, %3186, %3182, %3179, %3176 + %3213 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3214 = icmp sgt i32 %3213, 4 + br i1 %3214, label %3215, label %3247 + +3215: ; preds = %3212 + %3216 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3217 = icmp slt i32 %3216, 4 + br i1 %3217, label %3218, label %3247 + +3218: ; preds = %3215 + %3219 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3220 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3221 = icmp eq i64 %3219, %3220 + br i1 %3221, label %3222, label %3247 + +3222: ; preds = %3218 + %3223 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3224 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %3225 = icmp eq i64 %3223, %3224 + br i1 %3225, label %3226, label %3247 + +3226: ; preds = %3222 + %3227 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %3228 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3229 = icmp sgt i64 %3228, %3227 + br i1 %3229, label %3230, label %3247 + +3230: ; preds = %3226 + %3231 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3232 = add nsw i32 %3231, -4 + store volatile i32 %3232, i32* @P2_is_marked, align 4, !tbaa !5 + %3233 = add nsw i64 %3228, %3227 + %3234 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3235 = sext i32 %3234 to i64 + %3236 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3235 + store volatile i64 %3227, i64* %3236, align 8, !tbaa !9 + %3237 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3238 = add nsw i32 %3237, 1 + %3239 = sext i32 %3238 to i64 + %3240 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3239 + store volatile i64 %3228, i64* %3240, align 8, !tbaa !9 + %3241 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3242 = add nsw i32 %3241, 2 + %3243 = sext i32 %3242 to i64 + %3244 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3243 + store volatile i64 %3233, i64* %3244, align 8, !tbaa !9 + %3245 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3246 = add nsw i32 %3245, 3 + store volatile i32 %3246, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3247 + +3247: ; preds = %3226, %3230, %3222, %3218, %3215, %3212 + %3248 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3249 = icmp sgt i32 %3248, 4 + br i1 %3249, label %3250, label %3283 + +3250: ; preds = %3247 + %3251 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3252 = icmp slt i32 %3251, 4 + br i1 %3252, label %3253, label %3283 + +3253: ; preds = %3250 + %3254 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %3255 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3256 = icmp eq i64 %3254, %3255 + br i1 %3256, label %3257, label %3283 + +3257: ; preds = %3253 + %3258 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %3259 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3260 = icmp eq i64 %3258, %3259 + br i1 %3260, label %3261, label %3283 + +3261: ; preds = %3257 + %3262 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %3263 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %3264 = icmp sgt i64 %3263, %3262 + br i1 %3264, label %3265, label %3283 + +3265: ; preds = %3261 + %3266 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + store volatile i64 %3266, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3267 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3268 = add nsw i32 %3267, -4 + store volatile i32 %3268, i32* @P2_is_marked, align 4, !tbaa !5 + %3269 = add nsw i64 %3263, %3262 + %3270 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3271 = sext i32 %3270 to i64 + %3272 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3271 + store volatile i64 %3262, i64* %3272, align 8, !tbaa !9 + %3273 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3274 = add nsw i32 %3273, 1 + %3275 = sext i32 %3274 to i64 + %3276 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3275 + store volatile i64 %3263, i64* %3276, align 8, !tbaa !9 + %3277 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3278 = add nsw i32 %3277, 2 + %3279 = sext i32 %3278 to i64 + %3280 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3279 + store volatile i64 %3269, i64* %3280, align 8, !tbaa !9 + %3281 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3282 = add nsw i32 %3281, 3 + store volatile i32 %3282, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3283 + +3283: ; preds = %3261, %3265, %3257, %3253, %3250, %3247 + %3284 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3285 = icmp sgt i32 %3284, 4 + br i1 %3285, label %3286, label %3318 + +3286: ; preds = %3283 + %3287 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3288 = icmp slt i32 %3287, 4 + br i1 %3288, label %3289, label %3318 + +3289: ; preds = %3286 + %3290 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %3291 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3292 = icmp eq i64 %3290, %3291 + br i1 %3292, label %3293, label %3318 + +3293: ; preds = %3289 + %3294 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %3295 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3296 = icmp eq i64 %3294, %3295 + br i1 %3296, label %3297, label %3318 + +3297: ; preds = %3293 + %3298 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %3299 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %3300 = icmp sgt i64 %3299, %3298 + br i1 %3300, label %3301, label %3318 + +3301: ; preds = %3297 + %3302 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3303 = add nsw i32 %3302, -4 + store volatile i32 %3303, i32* @P2_is_marked, align 4, !tbaa !5 + %3304 = add nsw i64 %3299, %3298 + %3305 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3306 = sext i32 %3305 to i64 + %3307 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3306 + store volatile i64 %3298, i64* %3307, align 8, !tbaa !9 + %3308 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3309 = add nsw i32 %3308, 1 + %3310 = sext i32 %3309 to i64 + %3311 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3310 + store volatile i64 %3299, i64* %3311, align 8, !tbaa !9 + %3312 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3313 = add nsw i32 %3312, 2 + %3314 = sext i32 %3313 to i64 + %3315 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3314 + store volatile i64 %3304, i64* %3315, align 8, !tbaa !9 + %3316 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3317 = add nsw i32 %3316, 3 + store volatile i32 %3317, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3318 + +3318: ; preds = %3297, %3301, %3293, %3289, %3286, %3283 + %3319 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3320 = icmp sgt i32 %3319, 4 + br i1 %3320, label %3321, label %3354 + +3321: ; preds = %3318 + %3322 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3323 = icmp slt i32 %3322, 4 + br i1 %3323, label %3324, label %3354 + +3324: ; preds = %3321 + %3325 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %3326 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3327 = icmp eq i64 %3325, %3326 + br i1 %3327, label %3328, label %3354 + +3328: ; preds = %3324 + %3329 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %3330 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3331 = icmp eq i64 %3329, %3330 + br i1 %3331, label %3332, label %3354 + +3332: ; preds = %3328 + %3333 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %3334 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %3335 = icmp sgt i64 %3334, %3333 + br i1 %3335, label %3336, label %3354 + +3336: ; preds = %3332 + %3337 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + store volatile i64 %3337, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3338 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3339 = add nsw i32 %3338, -4 + store volatile i32 %3339, i32* @P2_is_marked, align 4, !tbaa !5 + %3340 = add nsw i64 %3334, %3333 + %3341 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3342 = sext i32 %3341 to i64 + %3343 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3342 + store volatile i64 %3333, i64* %3343, align 8, !tbaa !9 + %3344 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3345 = add nsw i32 %3344, 1 + %3346 = sext i32 %3345 to i64 + %3347 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3346 + store volatile i64 %3334, i64* %3347, align 8, !tbaa !9 + %3348 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3349 = add nsw i32 %3348, 2 + %3350 = sext i32 %3349 to i64 + %3351 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3350 + store volatile i64 %3340, i64* %3351, align 8, !tbaa !9 + %3352 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3353 = add nsw i32 %3352, 3 + store volatile i32 %3353, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3354 + +3354: ; preds = %3332, %3336, %3328, %3324, %3321, %3318 + %3355 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3356 = icmp sgt i32 %3355, 4 + br i1 %3356, label %3357, label %3389 + +3357: ; preds = %3354 + %3358 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3359 = icmp slt i32 %3358, 4 + br i1 %3359, label %3360, label %3389 + +3360: ; preds = %3357 + %3361 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %3362 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3363 = icmp eq i64 %3361, %3362 + br i1 %3363, label %3364, label %3389 + +3364: ; preds = %3360 + %3365 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %3366 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3367 = icmp eq i64 %3365, %3366 + br i1 %3367, label %3368, label %3389 + +3368: ; preds = %3364 + %3369 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %3370 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %3371 = icmp sgt i64 %3370, %3369 + br i1 %3371, label %3372, label %3389 + +3372: ; preds = %3368 + %3373 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3374 = add nsw i32 %3373, -4 + store volatile i32 %3374, i32* @P2_is_marked, align 4, !tbaa !5 + %3375 = add nsw i64 %3370, %3369 + %3376 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3377 = sext i32 %3376 to i64 + %3378 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3377 + store volatile i64 %3369, i64* %3378, align 8, !tbaa !9 + %3379 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3380 = add nsw i32 %3379, 1 + %3381 = sext i32 %3380 to i64 + %3382 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3381 + store volatile i64 %3370, i64* %3382, align 8, !tbaa !9 + %3383 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3384 = add nsw i32 %3383, 2 + %3385 = sext i32 %3384 to i64 + %3386 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3385 + store volatile i64 %3375, i64* %3386, align 8, !tbaa !9 + %3387 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3388 = add nsw i32 %3387, 3 + store volatile i32 %3388, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3389 + +3389: ; preds = %3368, %3372, %3364, %3360, %3357, %3354 + %3390 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3391 = icmp sgt i32 %3390, 4 + br i1 %3391, label %3392, label %3425 + +3392: ; preds = %3389 + %3393 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3394 = icmp slt i32 %3393, 4 + br i1 %3394, label %3395, label %3425 + +3395: ; preds = %3392 + %3396 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3397 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3398 = icmp eq i64 %3396, %3397 + br i1 %3398, label %3399, label %3425 + +3399: ; preds = %3395 + %3400 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3401 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3402 = icmp eq i64 %3400, %3401 + br i1 %3402, label %3403, label %3425 + +3403: ; preds = %3399 + %3404 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %3405 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3406 = icmp sgt i64 %3405, %3404 + br i1 %3406, label %3407, label %3425 + +3407: ; preds = %3403 + %3408 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + store volatile i64 %3408, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3409 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3410 = add nsw i32 %3409, -4 + store volatile i32 %3410, i32* @P2_is_marked, align 4, !tbaa !5 + %3411 = add nsw i64 %3405, %3404 + %3412 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3413 = sext i32 %3412 to i64 + %3414 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3413 + store volatile i64 %3404, i64* %3414, align 8, !tbaa !9 + %3415 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3416 = add nsw i32 %3415, 1 + %3417 = sext i32 %3416 to i64 + %3418 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3417 + store volatile i64 %3405, i64* %3418, align 8, !tbaa !9 + %3419 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3420 = add nsw i32 %3419, 2 + %3421 = sext i32 %3420 to i64 + %3422 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3421 + store volatile i64 %3411, i64* %3422, align 8, !tbaa !9 + %3423 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3424 = add nsw i32 %3423, 3 + store volatile i32 %3424, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3425 + +3425: ; preds = %3403, %3407, %3399, %3395, %3392, %3389 + %3426 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3427 = icmp sgt i32 %3426, 4 + br i1 %3427, label %3428, label %3461 + +3428: ; preds = %3425 + %3429 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3430 = icmp slt i32 %3429, 4 + br i1 %3430, label %3431, label %3461 + +3431: ; preds = %3428 + %3432 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3433 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3434 = icmp eq i64 %3432, %3433 + br i1 %3434, label %3435, label %3461 + +3435: ; preds = %3431 + %3436 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3437 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %3438 = icmp eq i64 %3436, %3437 + br i1 %3438, label %3439, label %3461 + +3439: ; preds = %3435 + %3440 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %3441 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3442 = icmp sgt i64 %3441, %3440 + br i1 %3442, label %3443, label %3461 + +3443: ; preds = %3439 + %3444 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + store volatile i64 %3444, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3445 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3446 = add nsw i32 %3445, -4 + store volatile i32 %3446, i32* @P2_is_marked, align 4, !tbaa !5 + %3447 = add nsw i64 %3441, %3440 + %3448 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3449 = sext i32 %3448 to i64 + %3450 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3449 + store volatile i64 %3440, i64* %3450, align 8, !tbaa !9 + %3451 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3452 = add nsw i32 %3451, 1 + %3453 = sext i32 %3452 to i64 + %3454 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3453 + store volatile i64 %3441, i64* %3454, align 8, !tbaa !9 + %3455 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3456 = add nsw i32 %3455, 2 + %3457 = sext i32 %3456 to i64 + %3458 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3457 + store volatile i64 %3447, i64* %3458, align 8, !tbaa !9 + %3459 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3460 = add nsw i32 %3459, 3 + store volatile i32 %3460, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3461 + +3461: ; preds = %3439, %3443, %3435, %3431, %3428, %3425 + %3462 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3463 = icmp sgt i32 %3462, 4 + br i1 %3463, label %3464, label %3497 + +3464: ; preds = %3461 + %3465 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3466 = icmp slt i32 %3465, 4 + br i1 %3466, label %3467, label %3497 + +3467: ; preds = %3464 + %3468 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3469 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3470 = icmp eq i64 %3468, %3469 + br i1 %3470, label %3471, label %3497 + +3471: ; preds = %3467 + %3472 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3473 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3474 = icmp eq i64 %3472, %3473 + br i1 %3474, label %3475, label %3497 + +3475: ; preds = %3471 + %3476 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %3477 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3478 = icmp sgt i64 %3477, %3476 + br i1 %3478, label %3479, label %3497 + +3479: ; preds = %3475 + %3480 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + store volatile i64 %3480, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3481 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3482 = add nsw i32 %3481, -4 + store volatile i32 %3482, i32* @P2_is_marked, align 4, !tbaa !5 + %3483 = add nsw i64 %3477, %3476 + %3484 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3485 = sext i32 %3484 to i64 + %3486 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3485 + store volatile i64 %3476, i64* %3486, align 8, !tbaa !9 + %3487 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3488 = add nsw i32 %3487, 1 + %3489 = sext i32 %3488 to i64 + %3490 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3489 + store volatile i64 %3477, i64* %3490, align 8, !tbaa !9 + %3491 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3492 = add nsw i32 %3491, 2 + %3493 = sext i32 %3492 to i64 + %3494 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3493 + store volatile i64 %3483, i64* %3494, align 8, !tbaa !9 + %3495 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3496 = add nsw i32 %3495, 3 + store volatile i32 %3496, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3497 + +3497: ; preds = %3475, %3479, %3471, %3467, %3464, %3461 + %3498 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3499 = icmp sgt i32 %3498, 4 + br i1 %3499, label %3500, label %3532 + +3500: ; preds = %3497 + %3501 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3502 = icmp slt i32 %3501, 4 + br i1 %3502, label %3503, label %3532 + +3503: ; preds = %3500 + %3504 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3505 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3506 = icmp eq i64 %3504, %3505 + br i1 %3506, label %3507, label %3532 + +3507: ; preds = %3503 + %3508 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3509 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %3510 = icmp eq i64 %3508, %3509 + br i1 %3510, label %3511, label %3532 + +3511: ; preds = %3507 + %3512 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %3513 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3514 = icmp sgt i64 %3513, %3512 + br i1 %3514, label %3515, label %3532 + +3515: ; preds = %3511 + %3516 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3517 = add nsw i32 %3516, -4 + store volatile i32 %3517, i32* @P2_is_marked, align 4, !tbaa !5 + %3518 = add nsw i64 %3513, %3512 + %3519 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3520 = sext i32 %3519 to i64 + %3521 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3520 + store volatile i64 %3512, i64* %3521, align 8, !tbaa !9 + %3522 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3523 = add nsw i32 %3522, 1 + %3524 = sext i32 %3523 to i64 + %3525 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3524 + store volatile i64 %3513, i64* %3525, align 8, !tbaa !9 + %3526 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3527 = add nsw i32 %3526, 2 + %3528 = sext i32 %3527 to i64 + %3529 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3528 + store volatile i64 %3518, i64* %3529, align 8, !tbaa !9 + %3530 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3531 = add nsw i32 %3530, 3 + store volatile i32 %3531, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3532 + +3532: ; preds = %3511, %3515, %3507, %3503, %3500, %3497 + %3533 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3534 = icmp sgt i32 %3533, 4 + br i1 %3534, label %3535, label %3568 + +3535: ; preds = %3532 + %3536 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3537 = icmp slt i32 %3536, 4 + br i1 %3537, label %3538, label %3568 + +3538: ; preds = %3535 + %3539 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3540 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %3541 = icmp eq i64 %3539, %3540 + br i1 %3541, label %3542, label %3568 + +3542: ; preds = %3538 + %3543 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3544 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3545 = icmp eq i64 %3543, %3544 + br i1 %3545, label %3546, label %3568 + +3546: ; preds = %3542 + %3547 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %3548 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3549 = icmp sgt i64 %3548, %3547 + br i1 %3549, label %3550, label %3568 + +3550: ; preds = %3546 + %3551 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + store volatile i64 %3551, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3552 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3553 = add nsw i32 %3552, -4 + store volatile i32 %3553, i32* @P2_is_marked, align 4, !tbaa !5 + %3554 = add nsw i64 %3548, %3547 + %3555 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3556 = sext i32 %3555 to i64 + %3557 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3556 + store volatile i64 %3547, i64* %3557, align 8, !tbaa !9 + %3558 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3559 = add nsw i32 %3558, 1 + %3560 = sext i32 %3559 to i64 + %3561 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3560 + store volatile i64 %3548, i64* %3561, align 8, !tbaa !9 + %3562 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3563 = add nsw i32 %3562, 2 + %3564 = sext i32 %3563 to i64 + %3565 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3564 + store volatile i64 %3554, i64* %3565, align 8, !tbaa !9 + %3566 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3567 = add nsw i32 %3566, 3 + store volatile i32 %3567, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3568 + +3568: ; preds = %3546, %3550, %3542, %3538, %3535, %3532 + %3569 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3570 = icmp sgt i32 %3569, 4 + br i1 %3570, label %3571, label %3603 + +3571: ; preds = %3568 + %3572 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3573 = icmp slt i32 %3572, 4 + br i1 %3573, label %3574, label %3603 + +3574: ; preds = %3571 + %3575 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3576 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %3577 = icmp eq i64 %3575, %3576 + br i1 %3577, label %3578, label %3603 + +3578: ; preds = %3574 + %3579 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3580 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3581 = icmp eq i64 %3579, %3580 + br i1 %3581, label %3582, label %3603 + +3582: ; preds = %3578 + %3583 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %3584 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3585 = icmp sgt i64 %3584, %3583 + br i1 %3585, label %3586, label %3603 + +3586: ; preds = %3582 + %3587 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3588 = add nsw i32 %3587, -4 + store volatile i32 %3588, i32* @P2_is_marked, align 4, !tbaa !5 + %3589 = add nsw i64 %3584, %3583 + %3590 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3591 = sext i32 %3590 to i64 + %3592 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3591 + store volatile i64 %3583, i64* %3592, align 8, !tbaa !9 + %3593 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3594 = add nsw i32 %3593, 1 + %3595 = sext i32 %3594 to i64 + %3596 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3595 + store volatile i64 %3584, i64* %3596, align 8, !tbaa !9 + %3597 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3598 = add nsw i32 %3597, 2 + %3599 = sext i32 %3598 to i64 + %3600 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3599 + store volatile i64 %3589, i64* %3600, align 8, !tbaa !9 + %3601 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3602 = add nsw i32 %3601, 3 + store volatile i32 %3602, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3603 + +3603: ; preds = %3582, %3586, %3578, %3574, %3571, %3568 + %3604 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3605 = icmp sgt i32 %3604, 4 + br i1 %3605, label %3606, label %3639 + +3606: ; preds = %3603 + %3607 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3608 = icmp slt i32 %3607, 4 + br i1 %3608, label %3609, label %3639 + +3609: ; preds = %3606 + %3610 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3611 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3612 = icmp eq i64 %3610, %3611 + br i1 %3612, label %3613, label %3639 + +3613: ; preds = %3609 + %3614 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3615 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %3616 = icmp eq i64 %3614, %3615 + br i1 %3616, label %3617, label %3639 + +3617: ; preds = %3613 + %3618 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3619 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3620 = icmp sgt i64 %3619, %3618 + br i1 %3620, label %3621, label %3639 + +3621: ; preds = %3617 + %3622 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + store volatile i64 %3622, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3623 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3624 = add nsw i32 %3623, -4 + store volatile i32 %3624, i32* @P2_is_marked, align 4, !tbaa !5 + %3625 = add nsw i64 %3619, %3618 + %3626 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3627 = sext i32 %3626 to i64 + %3628 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3627 + store volatile i64 %3618, i64* %3628, align 8, !tbaa !9 + %3629 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3630 = add nsw i32 %3629, 1 + %3631 = sext i32 %3630 to i64 + %3632 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3631 + store volatile i64 %3619, i64* %3632, align 8, !tbaa !9 + %3633 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3634 = add nsw i32 %3633, 2 + %3635 = sext i32 %3634 to i64 + %3636 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3635 + store volatile i64 %3625, i64* %3636, align 8, !tbaa !9 + %3637 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3638 = add nsw i32 %3637, 3 + store volatile i32 %3638, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3639 + +3639: ; preds = %3617, %3621, %3613, %3609, %3606, %3603 + %3640 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3641 = icmp sgt i32 %3640, 4 + br i1 %3641, label %3642, label %3675 + +3642: ; preds = %3639 + %3643 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3644 = icmp slt i32 %3643, 4 + br i1 %3644, label %3645, label %3675 + +3645: ; preds = %3642 + %3646 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3647 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3648 = icmp eq i64 %3646, %3647 + br i1 %3648, label %3649, label %3675 + +3649: ; preds = %3645 + %3650 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3651 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %3652 = icmp eq i64 %3650, %3651 + br i1 %3652, label %3653, label %3675 + +3653: ; preds = %3649 + %3654 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3655 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3656 = icmp sgt i64 %3655, %3654 + br i1 %3656, label %3657, label %3675 + +3657: ; preds = %3653 + %3658 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + store volatile i64 %3658, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3659 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3660 = add nsw i32 %3659, -4 + store volatile i32 %3660, i32* @P2_is_marked, align 4, !tbaa !5 + %3661 = add nsw i64 %3655, %3654 + %3662 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3663 = sext i32 %3662 to i64 + %3664 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3663 + store volatile i64 %3654, i64* %3664, align 8, !tbaa !9 + %3665 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3666 = add nsw i32 %3665, 1 + %3667 = sext i32 %3666 to i64 + %3668 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3667 + store volatile i64 %3655, i64* %3668, align 8, !tbaa !9 + %3669 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3670 = add nsw i32 %3669, 2 + %3671 = sext i32 %3670 to i64 + %3672 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3671 + store volatile i64 %3661, i64* %3672, align 8, !tbaa !9 + %3673 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3674 = add nsw i32 %3673, 3 + store volatile i32 %3674, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3675 + +3675: ; preds = %3653, %3657, %3649, %3645, %3642, %3639 + %3676 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3677 = icmp sgt i32 %3676, 4 + br i1 %3677, label %3678, label %3711 + +3678: ; preds = %3675 + %3679 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3680 = icmp slt i32 %3679, 4 + br i1 %3680, label %3681, label %3711 + +3681: ; preds = %3678 + %3682 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3683 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %3684 = icmp eq i64 %3682, %3683 + br i1 %3684, label %3685, label %3711 + +3685: ; preds = %3681 + %3686 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3687 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3688 = icmp eq i64 %3686, %3687 + br i1 %3688, label %3689, label %3711 + +3689: ; preds = %3685 + %3690 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3691 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3692 = icmp sgt i64 %3691, %3690 + br i1 %3692, label %3693, label %3711 + +3693: ; preds = %3689 + %3694 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + store volatile i64 %3694, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3695 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3696 = add nsw i32 %3695, -4 + store volatile i32 %3696, i32* @P2_is_marked, align 4, !tbaa !5 + %3697 = add nsw i64 %3691, %3690 + %3698 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3699 = sext i32 %3698 to i64 + %3700 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3699 + store volatile i64 %3690, i64* %3700, align 8, !tbaa !9 + %3701 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3702 = add nsw i32 %3701, 1 + %3703 = sext i32 %3702 to i64 + %3704 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3703 + store volatile i64 %3691, i64* %3704, align 8, !tbaa !9 + %3705 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3706 = add nsw i32 %3705, 2 + %3707 = sext i32 %3706 to i64 + %3708 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3707 + store volatile i64 %3697, i64* %3708, align 8, !tbaa !9 + %3709 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3710 = add nsw i32 %3709, 3 + store volatile i32 %3710, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3711 + +3711: ; preds = %3689, %3693, %3685, %3681, %3678, %3675 + %3712 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3713 = icmp sgt i32 %3712, 4 + br i1 %3713, label %3714, label %3747 + +3714: ; preds = %3711 + %3715 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3716 = icmp slt i32 %3715, 4 + br i1 %3716, label %3717, label %3747 + +3717: ; preds = %3714 + %3718 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3719 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %3720 = icmp eq i64 %3718, %3719 + br i1 %3720, label %3721, label %3747 + +3721: ; preds = %3717 + %3722 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3723 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %3724 = icmp eq i64 %3722, %3723 + br i1 %3724, label %3725, label %3747 + +3725: ; preds = %3721 + %3726 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3727 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3728 = icmp sgt i64 %3727, %3726 + br i1 %3728, label %3729, label %3747 + +3729: ; preds = %3725 + %3730 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + store volatile i64 %3730, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3731 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3732 = add nsw i32 %3731, -4 + store volatile i32 %3732, i32* @P2_is_marked, align 4, !tbaa !5 + %3733 = add nsw i64 %3727, %3726 + %3734 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3735 = sext i32 %3734 to i64 + %3736 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3735 + store volatile i64 %3726, i64* %3736, align 8, !tbaa !9 + %3737 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3738 = add nsw i32 %3737, 1 + %3739 = sext i32 %3738 to i64 + %3740 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3739 + store volatile i64 %3727, i64* %3740, align 8, !tbaa !9 + %3741 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3742 = add nsw i32 %3741, 2 + %3743 = sext i32 %3742 to i64 + %3744 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3743 + store volatile i64 %3733, i64* %3744, align 8, !tbaa !9 + %3745 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3746 = add nsw i32 %3745, 3 + store volatile i32 %3746, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3747 + +3747: ; preds = %3725, %3729, %3721, %3717, %3714, %3711 + %3748 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3749 = icmp sgt i32 %3748, 4 + br i1 %3749, label %3750, label %3783 + +3750: ; preds = %3747 + %3751 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3752 = icmp slt i32 %3751, 4 + br i1 %3752, label %3753, label %3783 + +3753: ; preds = %3750 + %3754 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3755 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %3756 = icmp eq i64 %3754, %3755 + br i1 %3756, label %3757, label %3783 + +3757: ; preds = %3753 + %3758 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3759 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3760 = icmp eq i64 %3758, %3759 + br i1 %3760, label %3761, label %3783 + +3761: ; preds = %3757 + %3762 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3763 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3764 = icmp sgt i64 %3763, %3762 + br i1 %3764, label %3765, label %3783 + +3765: ; preds = %3761 + %3766 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + store volatile i64 %3766, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3767 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3768 = add nsw i32 %3767, -4 + store volatile i32 %3768, i32* @P2_is_marked, align 4, !tbaa !5 + %3769 = add nsw i64 %3763, %3762 + %3770 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3771 = sext i32 %3770 to i64 + %3772 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3771 + store volatile i64 %3762, i64* %3772, align 8, !tbaa !9 + %3773 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3774 = add nsw i32 %3773, 1 + %3775 = sext i32 %3774 to i64 + %3776 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3775 + store volatile i64 %3763, i64* %3776, align 8, !tbaa !9 + %3777 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3778 = add nsw i32 %3777, 2 + %3779 = sext i32 %3778 to i64 + %3780 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3779 + store volatile i64 %3769, i64* %3780, align 8, !tbaa !9 + %3781 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3782 = add nsw i32 %3781, 3 + store volatile i32 %3782, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3783 + +3783: ; preds = %3761, %3765, %3757, %3753, %3750, %3747 + %3784 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3785 = icmp sgt i32 %3784, 4 + br i1 %3785, label %3786, label %3819 + +3786: ; preds = %3783 + %3787 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3788 = icmp slt i32 %3787, 4 + br i1 %3788, label %3789, label %3819 + +3789: ; preds = %3786 + %3790 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3791 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %3792 = icmp eq i64 %3790, %3791 + br i1 %3792, label %3793, label %3819 + +3793: ; preds = %3789 + %3794 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3795 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %3796 = icmp eq i64 %3794, %3795 + br i1 %3796, label %3797, label %3819 + +3797: ; preds = %3793 + %3798 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3799 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3800 = icmp sgt i64 %3799, %3798 + br i1 %3800, label %3801, label %3819 + +3801: ; preds = %3797 + %3802 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + store volatile i64 %3802, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3803 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3804 = add nsw i32 %3803, -4 + store volatile i32 %3804, i32* @P2_is_marked, align 4, !tbaa !5 + %3805 = add nsw i64 %3799, %3798 + %3806 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3807 = sext i32 %3806 to i64 + %3808 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3807 + store volatile i64 %3798, i64* %3808, align 8, !tbaa !9 + %3809 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3810 = add nsw i32 %3809, 1 + %3811 = sext i32 %3810 to i64 + %3812 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3811 + store volatile i64 %3799, i64* %3812, align 8, !tbaa !9 + %3813 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3814 = add nsw i32 %3813, 2 + %3815 = sext i32 %3814 to i64 + %3816 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3815 + store volatile i64 %3805, i64* %3816, align 8, !tbaa !9 + %3817 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3818 = add nsw i32 %3817, 3 + store volatile i32 %3818, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3819 + +3819: ; preds = %3797, %3801, %3793, %3789, %3786, %3783 + %3820 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3821 = icmp sgt i32 %3820, 4 + br i1 %3821, label %3822, label %3855 + +3822: ; preds = %3819 + %3823 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3824 = icmp slt i32 %3823, 4 + br i1 %3824, label %3825, label %3855 + +3825: ; preds = %3822 + %3826 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3827 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3828 = icmp eq i64 %3826, %3827 + br i1 %3828, label %3829, label %3855 + +3829: ; preds = %3825 + %3830 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3831 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %3832 = icmp eq i64 %3830, %3831 + br i1 %3832, label %3833, label %3855 + +3833: ; preds = %3829 + %3834 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3835 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3836 = icmp sgt i64 %3835, %3834 + br i1 %3836, label %3837, label %3855 + +3837: ; preds = %3833 + %3838 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + store volatile i64 %3838, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3839 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3840 = add nsw i32 %3839, -4 + store volatile i32 %3840, i32* @P2_is_marked, align 4, !tbaa !5 + %3841 = add nsw i64 %3835, %3834 + %3842 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3843 = sext i32 %3842 to i64 + %3844 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3843 + store volatile i64 %3834, i64* %3844, align 8, !tbaa !9 + %3845 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3846 = add nsw i32 %3845, 1 + %3847 = sext i32 %3846 to i64 + %3848 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3847 + store volatile i64 %3835, i64* %3848, align 8, !tbaa !9 + %3849 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3850 = add nsw i32 %3849, 2 + %3851 = sext i32 %3850 to i64 + %3852 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3851 + store volatile i64 %3841, i64* %3852, align 8, !tbaa !9 + %3853 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3854 = add nsw i32 %3853, 3 + store volatile i32 %3854, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3855 + +3855: ; preds = %3833, %3837, %3829, %3825, %3822, %3819 + %3856 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3857 = icmp sgt i32 %3856, 4 + br i1 %3857, label %3858, label %3891 + +3858: ; preds = %3855 + %3859 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3860 = icmp slt i32 %3859, 4 + br i1 %3860, label %3861, label %3891 + +3861: ; preds = %3858 + %3862 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3863 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3864 = icmp eq i64 %3862, %3863 + br i1 %3864, label %3865, label %3891 + +3865: ; preds = %3861 + %3866 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3867 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %3868 = icmp eq i64 %3866, %3867 + br i1 %3868, label %3869, label %3891 + +3869: ; preds = %3865 + %3870 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3871 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3872 = icmp sgt i64 %3871, %3870 + br i1 %3872, label %3873, label %3891 + +3873: ; preds = %3869 + %3874 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + store volatile i64 %3874, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3875 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3876 = add nsw i32 %3875, -4 + store volatile i32 %3876, i32* @P2_is_marked, align 4, !tbaa !5 + %3877 = add nsw i64 %3871, %3870 + %3878 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3879 = sext i32 %3878 to i64 + %3880 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3879 + store volatile i64 %3870, i64* %3880, align 8, !tbaa !9 + %3881 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3882 = add nsw i32 %3881, 1 + %3883 = sext i32 %3882 to i64 + %3884 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3883 + store volatile i64 %3871, i64* %3884, align 8, !tbaa !9 + %3885 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3886 = add nsw i32 %3885, 2 + %3887 = sext i32 %3886 to i64 + %3888 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3887 + store volatile i64 %3877, i64* %3888, align 8, !tbaa !9 + %3889 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3890 = add nsw i32 %3889, 3 + store volatile i32 %3890, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3891 + +3891: ; preds = %3869, %3873, %3865, %3861, %3858, %3855 + %3892 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3893 = icmp sgt i32 %3892, 4 + br i1 %3893, label %3894, label %3927 + +3894: ; preds = %3891 + %3895 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3896 = icmp slt i32 %3895, 4 + br i1 %3896, label %3897, label %3927 + +3897: ; preds = %3894 + %3898 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3899 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %3900 = icmp eq i64 %3898, %3899 + br i1 %3900, label %3901, label %3927 + +3901: ; preds = %3897 + %3902 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3903 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3904 = icmp eq i64 %3902, %3903 + br i1 %3904, label %3905, label %3927 + +3905: ; preds = %3901 + %3906 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3907 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3908 = icmp sgt i64 %3907, %3906 + br i1 %3908, label %3909, label %3927 + +3909: ; preds = %3905 + %3910 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + store volatile i64 %3910, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3911 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3912 = add nsw i32 %3911, -4 + store volatile i32 %3912, i32* @P2_is_marked, align 4, !tbaa !5 + %3913 = add nsw i64 %3907, %3906 + %3914 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3915 = sext i32 %3914 to i64 + %3916 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3915 + store volatile i64 %3906, i64* %3916, align 8, !tbaa !9 + %3917 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3918 = add nsw i32 %3917, 1 + %3919 = sext i32 %3918 to i64 + %3920 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3919 + store volatile i64 %3907, i64* %3920, align 8, !tbaa !9 + %3921 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3922 = add nsw i32 %3921, 2 + %3923 = sext i32 %3922 to i64 + %3924 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3923 + store volatile i64 %3913, i64* %3924, align 8, !tbaa !9 + %3925 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3926 = add nsw i32 %3925, 3 + store volatile i32 %3926, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3927 + +3927: ; preds = %3905, %3909, %3901, %3897, %3894, %3891 + %3928 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3929 = icmp sgt i32 %3928, 4 + br i1 %3929, label %3930, label %3962 + +3930: ; preds = %3927 + %3931 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3932 = icmp slt i32 %3931, 4 + br i1 %3932, label %3933, label %3962 + +3933: ; preds = %3930 + %3934 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3935 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %3936 = icmp eq i64 %3934, %3935 + br i1 %3936, label %3937, label %3962 + +3937: ; preds = %3933 + %3938 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3939 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %3940 = icmp eq i64 %3938, %3939 + br i1 %3940, label %3941, label %3962 + +3941: ; preds = %3937 + %3942 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3943 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3944 = icmp sgt i64 %3943, %3942 + br i1 %3944, label %3945, label %3962 + +3945: ; preds = %3941 + %3946 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3947 = add nsw i32 %3946, -4 + store volatile i32 %3947, i32* @P2_is_marked, align 4, !tbaa !5 + %3948 = add nsw i64 %3943, %3942 + %3949 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3950 = sext i32 %3949 to i64 + %3951 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3950 + store volatile i64 %3942, i64* %3951, align 8, !tbaa !9 + %3952 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3953 = add nsw i32 %3952, 1 + %3954 = sext i32 %3953 to i64 + %3955 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3954 + store volatile i64 %3943, i64* %3955, align 8, !tbaa !9 + %3956 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3957 = add nsw i32 %3956, 2 + %3958 = sext i32 %3957 to i64 + %3959 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3958 + store volatile i64 %3948, i64* %3959, align 8, !tbaa !9 + %3960 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3961 = add nsw i32 %3960, 3 + store volatile i32 %3961, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3962 + +3962: ; preds = %3941, %3945, %3937, %3933, %3930, %3927 + %3963 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3964 = icmp sgt i32 %3963, 4 + br i1 %3964, label %3965, label %3998 + +3965: ; preds = %3962 + %3966 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3967 = icmp slt i32 %3966, 4 + br i1 %3967, label %3968, label %3998 + +3968: ; preds = %3965 + %3969 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3970 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %3971 = icmp eq i64 %3969, %3970 + br i1 %3971, label %3972, label %3998 + +3972: ; preds = %3968 + %3973 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3974 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3975 = icmp eq i64 %3973, %3974 + br i1 %3975, label %3976, label %3998 + +3976: ; preds = %3972 + %3977 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %3978 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %3979 = icmp sgt i64 %3978, %3977 + br i1 %3979, label %3980, label %3998 + +3980: ; preds = %3976 + %3981 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + store volatile i64 %3981, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %3982 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %3983 = add nsw i32 %3982, -4 + store volatile i32 %3983, i32* @P2_is_marked, align 4, !tbaa !5 + %3984 = add nsw i64 %3978, %3977 + %3985 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3986 = sext i32 %3985 to i64 + %3987 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3986 + store volatile i64 %3977, i64* %3987, align 8, !tbaa !9 + %3988 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3989 = add nsw i32 %3988, 1 + %3990 = sext i32 %3989 to i64 + %3991 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3990 + store volatile i64 %3978, i64* %3991, align 8, !tbaa !9 + %3992 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3993 = add nsw i32 %3992, 2 + %3994 = sext i32 %3993 to i64 + %3995 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %3994 + store volatile i64 %3984, i64* %3995, align 8, !tbaa !9 + %3996 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %3997 = add nsw i32 %3996, 3 + store volatile i32 %3997, i32* @P3_is_marked, align 4, !tbaa !5 + br label %3998 + +3998: ; preds = %3976, %3980, %3972, %3968, %3965, %3962 + %3999 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %4000 = icmp sgt i32 %3999, 4 + br i1 %4000, label %4001, label %4033 + +4001: ; preds = %3998 + %4002 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4003 = icmp slt i32 %4002, 4 + br i1 %4003, label %4004, label %4033 + +4004: ; preds = %4001 + %4005 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %4006 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %4007 = icmp eq i64 %4005, %4006 + br i1 %4007, label %4008, label %4033 + +4008: ; preds = %4004 + %4009 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %4010 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %4011 = icmp eq i64 %4009, %4010 + br i1 %4011, label %4012, label %4033 + +4012: ; preds = %4008 + %4013 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %4014 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %4015 = icmp sgt i64 %4014, %4013 + br i1 %4015, label %4016, label %4033 + +4016: ; preds = %4012 + %4017 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %4018 = add nsw i32 %4017, -4 + store volatile i32 %4018, i32* @P2_is_marked, align 4, !tbaa !5 + %4019 = add nsw i64 %4014, %4013 + %4020 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4021 = sext i32 %4020 to i64 + %4022 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4021 + store volatile i64 %4013, i64* %4022, align 8, !tbaa !9 + %4023 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4024 = add nsw i32 %4023, 1 + %4025 = sext i32 %4024 to i64 + %4026 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4025 + store volatile i64 %4014, i64* %4026, align 8, !tbaa !9 + %4027 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4028 = add nsw i32 %4027, 2 + %4029 = sext i32 %4028 to i64 + %4030 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4029 + store volatile i64 %4019, i64* %4030, align 8, !tbaa !9 + %4031 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4032 = add nsw i32 %4031, 3 + store volatile i32 %4032, i32* @P3_is_marked, align 4, !tbaa !5 + br label %4033 + +4033: ; preds = %4012, %4016, %4008, %4004, %4001, %3998 + %4034 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %4035 = icmp sgt i32 %4034, 4 + br i1 %4035, label %4036, label %4069 + +4036: ; preds = %4033 + %4037 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4038 = icmp slt i32 %4037, 4 + br i1 %4038, label %4039, label %4069 + +4039: ; preds = %4036 + %4040 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %4041 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %4042 = icmp eq i64 %4040, %4041 + br i1 %4042, label %4043, label %4069 + +4043: ; preds = %4039 + %4044 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %4045 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %4046 = icmp eq i64 %4044, %4045 + br i1 %4046, label %4047, label %4069 + +4047: ; preds = %4043 + %4048 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %4049 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %4050 = icmp sgt i64 %4049, %4048 + br i1 %4050, label %4051, label %4069 + +4051: ; preds = %4047 + %4052 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + store volatile i64 %4052, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %4053 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %4054 = add nsw i32 %4053, -4 + store volatile i32 %4054, i32* @P2_is_marked, align 4, !tbaa !5 + %4055 = add nsw i64 %4049, %4048 + %4056 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4057 = sext i32 %4056 to i64 + %4058 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4057 + store volatile i64 %4048, i64* %4058, align 8, !tbaa !9 + %4059 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4060 = add nsw i32 %4059, 1 + %4061 = sext i32 %4060 to i64 + %4062 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4061 + store volatile i64 %4049, i64* %4062, align 8, !tbaa !9 + %4063 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4064 = add nsw i32 %4063, 2 + %4065 = sext i32 %4064 to i64 + %4066 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4065 + store volatile i64 %4055, i64* %4066, align 8, !tbaa !9 + %4067 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4068 = add nsw i32 %4067, 3 + store volatile i32 %4068, i32* @P3_is_marked, align 4, !tbaa !5 + br label %4069 + +4069: ; preds = %4047, %4051, %4043, %4039, %4036, %4033 + %4070 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %4071 = icmp sgt i32 %4070, 4 + br i1 %4071, label %4072, label %4105 + +4072: ; preds = %4069 + %4073 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4074 = icmp slt i32 %4073, 4 + br i1 %4074, label %4075, label %4105 + +4075: ; preds = %4072 + %4076 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %4077 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %4078 = icmp eq i64 %4076, %4077 + br i1 %4078, label %4079, label %4105 + +4079: ; preds = %4075 + %4080 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %4081 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %4082 = icmp eq i64 %4080, %4081 + br i1 %4082, label %4083, label %4105 + +4083: ; preds = %4079 + %4084 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %4085 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %4086 = icmp sgt i64 %4085, %4084 + br i1 %4086, label %4087, label %4105 + +4087: ; preds = %4083 + %4088 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + store volatile i64 %4088, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %4089 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %4090 = add nsw i32 %4089, -4 + store volatile i32 %4090, i32* @P2_is_marked, align 4, !tbaa !5 + %4091 = add nsw i64 %4085, %4084 + %4092 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4093 = sext i32 %4092 to i64 + %4094 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4093 + store volatile i64 %4084, i64* %4094, align 8, !tbaa !9 + %4095 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4096 = add nsw i32 %4095, 1 + %4097 = sext i32 %4096 to i64 + %4098 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4097 + store volatile i64 %4085, i64* %4098, align 8, !tbaa !9 + %4099 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4100 = add nsw i32 %4099, 2 + %4101 = sext i32 %4100 to i64 + %4102 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4101 + store volatile i64 %4091, i64* %4102, align 8, !tbaa !9 + %4103 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4104 = add nsw i32 %4103, 3 + store volatile i32 %4104, i32* @P3_is_marked, align 4, !tbaa !5 + br label %4105 + +4105: ; preds = %4083, %4087, %4079, %4075, %4072, %4069 + %4106 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %4107 = icmp sgt i32 %4106, 4 + br i1 %4107, label %4108, label %4141 + +4108: ; preds = %4105 + %4109 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4110 = icmp slt i32 %4109, 4 + br i1 %4110, label %4111, label %4141 + +4111: ; preds = %4108 + %4112 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %4113 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %4114 = icmp eq i64 %4112, %4113 + br i1 %4114, label %4115, label %4141 + +4115: ; preds = %4111 + %4116 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %4117 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %4118 = icmp eq i64 %4116, %4117 + br i1 %4118, label %4119, label %4141 + +4119: ; preds = %4115 + %4120 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %4121 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %4122 = icmp sgt i64 %4121, %4120 + br i1 %4122, label %4123, label %4141 + +4123: ; preds = %4119 + %4124 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + store volatile i64 %4124, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %4125 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %4126 = add nsw i32 %4125, -4 + store volatile i32 %4126, i32* @P2_is_marked, align 4, !tbaa !5 + %4127 = add nsw i64 %4121, %4120 + %4128 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4129 = sext i32 %4128 to i64 + %4130 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4129 + store volatile i64 %4120, i64* %4130, align 8, !tbaa !9 + %4131 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4132 = add nsw i32 %4131, 1 + %4133 = sext i32 %4132 to i64 + %4134 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4133 + store volatile i64 %4121, i64* %4134, align 8, !tbaa !9 + %4135 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4136 = add nsw i32 %4135, 2 + %4137 = sext i32 %4136 to i64 + %4138 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4137 + store volatile i64 %4127, i64* %4138, align 8, !tbaa !9 + %4139 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4140 = add nsw i32 %4139, 3 + store volatile i32 %4140, i32* @P3_is_marked, align 4, !tbaa !5 + br label %4141 + +4141: ; preds = %4119, %4123, %4115, %4111, %4108, %4105 + %4142 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %4143 = icmp sgt i32 %4142, 4 + br i1 %4143, label %4144, label %4176 + +4144: ; preds = %4141 + %4145 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4146 = icmp slt i32 %4145, 4 + br i1 %4146, label %4147, label %4176 + +4147: ; preds = %4144 + %4148 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %4149 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %4150 = icmp eq i64 %4148, %4149 + br i1 %4150, label %4151, label %4176 + +4151: ; preds = %4147 + %4152 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %4153 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %4154 = icmp eq i64 %4152, %4153 + br i1 %4154, label %4155, label %4176 + +4155: ; preds = %4151 + %4156 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %4157 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %4158 = icmp sgt i64 %4157, %4156 + br i1 %4158, label %4159, label %4176 + +4159: ; preds = %4155 + %4160 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %4161 = add nsw i32 %4160, -4 + store volatile i32 %4161, i32* @P2_is_marked, align 4, !tbaa !5 + %4162 = add nsw i64 %4157, %4156 + %4163 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4164 = sext i32 %4163 to i64 + %4165 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4164 + store volatile i64 %4156, i64* %4165, align 8, !tbaa !9 + %4166 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4167 = add nsw i32 %4166, 1 + %4168 = sext i32 %4167 to i64 + %4169 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4168 + store volatile i64 %4157, i64* %4169, align 8, !tbaa !9 + %4170 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4171 = add nsw i32 %4170, 2 + %4172 = sext i32 %4171 to i64 + %4173 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4172 + store volatile i64 %4162, i64* %4173, align 8, !tbaa !9 + %4174 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4175 = add nsw i32 %4174, 3 + store volatile i32 %4175, i32* @P3_is_marked, align 4, !tbaa !5 + br label %4176 + +4176: ; preds = %4155, %4159, %4151, %4147, %4144, %4141 + %4177 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %4178 = icmp sgt i32 %4177, 4 + br i1 %4178, label %4179, label %4212 + +4179: ; preds = %4176 + %4180 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4181 = icmp slt i32 %4180, 4 + br i1 %4181, label %4182, label %4212 + +4182: ; preds = %4179 + %4183 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %4184 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %4185 = icmp eq i64 %4183, %4184 + br i1 %4185, label %4186, label %4212 + +4186: ; preds = %4182 + %4187 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %4188 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %4189 = icmp eq i64 %4187, %4188 + br i1 %4189, label %4190, label %4212 + +4190: ; preds = %4186 + %4191 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %4192 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %4193 = icmp sgt i64 %4192, %4191 + br i1 %4193, label %4194, label %4212 + +4194: ; preds = %4190 + %4195 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + store volatile i64 %4195, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %4196 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %4197 = add nsw i32 %4196, -4 + store volatile i32 %4197, i32* @P2_is_marked, align 4, !tbaa !5 + %4198 = add nsw i64 %4192, %4191 + %4199 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4200 = sext i32 %4199 to i64 + %4201 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4200 + store volatile i64 %4191, i64* %4201, align 8, !tbaa !9 + %4202 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4203 = add nsw i32 %4202, 1 + %4204 = sext i32 %4203 to i64 + %4205 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4204 + store volatile i64 %4192, i64* %4205, align 8, !tbaa !9 + %4206 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4207 = add nsw i32 %4206, 2 + %4208 = sext i32 %4207 to i64 + %4209 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4208 + store volatile i64 %4198, i64* %4209, align 8, !tbaa !9 + %4210 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4211 = add nsw i32 %4210, 3 + store volatile i32 %4211, i32* @P3_is_marked, align 4, !tbaa !5 + br label %4212 + +4212: ; preds = %4190, %4194, %4186, %4182, %4179, %4176 + %4213 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %4214 = icmp sgt i32 %4213, 4 + br i1 %4214, label %4215, label %4247 + +4215: ; preds = %4212 + %4216 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4217 = icmp slt i32 %4216, 4 + br i1 %4217, label %4218, label %4247 + +4218: ; preds = %4215 + %4219 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %4220 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %4221 = icmp eq i64 %4219, %4220 + br i1 %4221, label %4222, label %4247 + +4222: ; preds = %4218 + %4223 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %4224 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %4225 = icmp eq i64 %4223, %4224 + br i1 %4225, label %4226, label %4247 + +4226: ; preds = %4222 + %4227 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %4228 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %4229 = icmp sgt i64 %4228, %4227 + br i1 %4229, label %4230, label %4247 + +4230: ; preds = %4226 + %4231 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %4232 = add nsw i32 %4231, -4 + store volatile i32 %4232, i32* @P2_is_marked, align 4, !tbaa !5 + %4233 = add nsw i64 %4228, %4227 + %4234 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4235 = sext i32 %4234 to i64 + %4236 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4235 + store volatile i64 %4227, i64* %4236, align 8, !tbaa !9 + %4237 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4238 = add nsw i32 %4237, 1 + %4239 = sext i32 %4238 to i64 + %4240 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4239 + store volatile i64 %4228, i64* %4240, align 8, !tbaa !9 + %4241 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4242 = add nsw i32 %4241, 2 + %4243 = sext i32 %4242 to i64 + %4244 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4243 + store volatile i64 %4233, i64* %4244, align 8, !tbaa !9 + %4245 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4246 = add nsw i32 %4245, 3 + store volatile i32 %4246, i32* @P3_is_marked, align 4, !tbaa !5 + br label %4247 + +4247: ; preds = %4226, %4230, %4222, %4218, %4215, %4212 + %4248 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %4249 = icmp sgt i32 %4248, 4 + br i1 %4249, label %4250, label %4283 + +4250: ; preds = %4247 + %4251 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4252 = icmp slt i32 %4251, 4 + br i1 %4252, label %4253, label %4283 + +4253: ; preds = %4250 + %4254 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %4255 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %4256 = icmp eq i64 %4254, %4255 + br i1 %4256, label %4257, label %4283 + +4257: ; preds = %4253 + %4258 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %4259 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %4260 = icmp eq i64 %4258, %4259 + br i1 %4260, label %4261, label %4283 + +4261: ; preds = %4257 + %4262 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %4263 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %4264 = icmp sgt i64 %4263, %4262 + br i1 %4264, label %4265, label %4283 + +4265: ; preds = %4261 + %4266 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + store volatile i64 %4266, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %4267 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %4268 = add nsw i32 %4267, -4 + store volatile i32 %4268, i32* @P2_is_marked, align 4, !tbaa !5 + %4269 = add nsw i64 %4263, %4262 + %4270 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4271 = sext i32 %4270 to i64 + %4272 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4271 + store volatile i64 %4262, i64* %4272, align 8, !tbaa !9 + %4273 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4274 = add nsw i32 %4273, 1 + %4275 = sext i32 %4274 to i64 + %4276 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4275 + store volatile i64 %4263, i64* %4276, align 8, !tbaa !9 + %4277 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4278 = add nsw i32 %4277, 2 + %4279 = sext i32 %4278 to i64 + %4280 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4279 + store volatile i64 %4269, i64* %4280, align 8, !tbaa !9 + %4281 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4282 = add nsw i32 %4281, 3 + store volatile i32 %4282, i32* @P3_is_marked, align 4, !tbaa !5 + br label %4283 + +4283: ; preds = %4261, %4265, %4257, %4253, %4250, %4247 + %4284 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %4285 = icmp sgt i32 %4284, 4 + br i1 %4285, label %4286, label %4319 + +4286: ; preds = %4283 + %4287 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4288 = icmp slt i32 %4287, 4 + br i1 %4288, label %4289, label %4319 + +4289: ; preds = %4286 + %4290 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %4291 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %4292 = icmp eq i64 %4290, %4291 + br i1 %4292, label %4293, label %4319 + +4293: ; preds = %4289 + %4294 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %4295 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %4296 = icmp eq i64 %4294, %4295 + br i1 %4296, label %4297, label %4319 + +4297: ; preds = %4293 + %4298 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %4299 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %4300 = icmp sgt i64 %4299, %4298 + br i1 %4300, label %4301, label %4319 + +4301: ; preds = %4297 + %4302 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + store volatile i64 %4302, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %4303 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %4304 = add nsw i32 %4303, -4 + store volatile i32 %4304, i32* @P2_is_marked, align 4, !tbaa !5 + %4305 = add nsw i64 %4299, %4298 + %4306 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4307 = sext i32 %4306 to i64 + %4308 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4307 + store volatile i64 %4298, i64* %4308, align 8, !tbaa !9 + %4309 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4310 = add nsw i32 %4309, 1 + %4311 = sext i32 %4310 to i64 + %4312 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4311 + store volatile i64 %4299, i64* %4312, align 8, !tbaa !9 + %4313 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4314 = add nsw i32 %4313, 2 + %4315 = sext i32 %4314 to i64 + %4316 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4315 + store volatile i64 %4305, i64* %4316, align 8, !tbaa !9 + %4317 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4318 = add nsw i32 %4317, 3 + store volatile i32 %4318, i32* @P3_is_marked, align 4, !tbaa !5 + br label %4319 + +4319: ; preds = %4297, %4301, %4293, %4289, %4286, %4283 + %4320 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %4321 = icmp sgt i32 %4320, 4 + br i1 %4321, label %4322, label %4355 + +4322: ; preds = %4319 + %4323 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4324 = icmp slt i32 %4323, 4 + br i1 %4324, label %4325, label %4355 + +4325: ; preds = %4322 + %4326 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %4327 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %4328 = icmp eq i64 %4326, %4327 + br i1 %4328, label %4329, label %4355 + +4329: ; preds = %4325 + %4330 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %4331 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %4332 = icmp eq i64 %4330, %4331 + br i1 %4332, label %4333, label %4355 + +4333: ; preds = %4329 + %4334 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %4335 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %4336 = icmp sgt i64 %4335, %4334 + br i1 %4336, label %4337, label %4355 + +4337: ; preds = %4333 + %4338 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + store volatile i64 %4338, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %4339 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %4340 = add nsw i32 %4339, -4 + store volatile i32 %4340, i32* @P2_is_marked, align 4, !tbaa !5 + %4341 = add nsw i64 %4335, %4334 + %4342 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4343 = sext i32 %4342 to i64 + %4344 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4343 + store volatile i64 %4334, i64* %4344, align 8, !tbaa !9 + %4345 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4346 = add nsw i32 %4345, 1 + %4347 = sext i32 %4346 to i64 + %4348 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4347 + store volatile i64 %4335, i64* %4348, align 8, !tbaa !9 + %4349 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4350 = add nsw i32 %4349, 2 + %4351 = sext i32 %4350 to i64 + %4352 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4351 + store volatile i64 %4341, i64* %4352, align 8, !tbaa !9 + %4353 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4354 = add nsw i32 %4353, 3 + store volatile i32 %4354, i32* @P3_is_marked, align 4, !tbaa !5 + br label %4355 + +4355: ; preds = %4333, %4337, %4329, %4325, %4322, %4319 + %4356 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %4357 = icmp sgt i32 %4356, 4 + br i1 %4357, label %4358, label %4390 + +4358: ; preds = %4355 + %4359 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4360 = icmp slt i32 %4359, 4 + br i1 %4360, label %4361, label %4390 + +4361: ; preds = %4358 + %4362 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %4363 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %4364 = icmp eq i64 %4362, %4363 + br i1 %4364, label %4365, label %4390 + +4365: ; preds = %4361 + %4366 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %4367 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %4368 = icmp eq i64 %4366, %4367 + br i1 %4368, label %4369, label %4390 + +4369: ; preds = %4365 + %4370 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %4371 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %4372 = icmp sgt i64 %4371, %4370 + br i1 %4372, label %4373, label %4390 + +4373: ; preds = %4369 + %4374 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %4375 = add nsw i32 %4374, -4 + store volatile i32 %4375, i32* @P2_is_marked, align 4, !tbaa !5 + %4376 = add nsw i64 %4371, %4370 + %4377 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4378 = sext i32 %4377 to i64 + %4379 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4378 + store volatile i64 %4370, i64* %4379, align 8, !tbaa !9 + %4380 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4381 = add nsw i32 %4380, 1 + %4382 = sext i32 %4381 to i64 + %4383 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4382 + store volatile i64 %4371, i64* %4383, align 8, !tbaa !9 + %4384 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4385 = add nsw i32 %4384, 2 + %4386 = sext i32 %4385 to i64 + %4387 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4386 + store volatile i64 %4376, i64* %4387, align 8, !tbaa !9 + %4388 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4389 = add nsw i32 %4388, 3 + store volatile i32 %4389, i32* @P3_is_marked, align 4, !tbaa !5 + br label %4390 + +4390: ; preds = %4369, %4373, %4365, %4361, %4358, %4355 + %4391 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %4392 = icmp sgt i32 %4391, 4 + br i1 %4392, label %4393, label %4426 + +4393: ; preds = %4390 + %4394 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4395 = icmp slt i32 %4394, 4 + br i1 %4395, label %4396, label %4426 + +4396: ; preds = %4393 + %4397 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %4398 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %4399 = icmp eq i64 %4397, %4398 + br i1 %4399, label %4400, label %4426 + +4400: ; preds = %4396 + %4401 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %4402 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %4403 = icmp eq i64 %4401, %4402 + br i1 %4403, label %4404, label %4426 + +4404: ; preds = %4400 + %4405 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %4406 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %4407 = icmp sgt i64 %4406, %4405 + br i1 %4407, label %4408, label %4426 + +4408: ; preds = %4404 + %4409 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + store volatile i64 %4409, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 0), align 16, !tbaa !9 + %4410 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %4411 = add nsw i32 %4410, -4 + store volatile i32 %4411, i32* @P2_is_marked, align 4, !tbaa !5 + %4412 = add nsw i64 %4406, %4405 + %4413 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4414 = sext i32 %4413 to i64 + %4415 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4414 + store volatile i64 %4405, i64* %4415, align 8, !tbaa !9 + %4416 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4417 = add nsw i32 %4416, 1 + %4418 = sext i32 %4417 to i64 + %4419 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4418 + store volatile i64 %4406, i64* %4419, align 8, !tbaa !9 + %4420 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4421 = add nsw i32 %4420, 2 + %4422 = sext i32 %4421 to i64 + %4423 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4422 + store volatile i64 %4412, i64* %4423, align 8, !tbaa !9 + %4424 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4425 = add nsw i32 %4424, 3 + store volatile i32 %4425, i32* @P3_is_marked, align 4, !tbaa !5 + br label %4426 + +4426: ; preds = %4404, %4408, %4400, %4396, %4393, %4390 + %4427 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %4428 = icmp sgt i32 %4427, 4 + br i1 %4428, label %4429, label %4461 + +4429: ; preds = %4426 + %4430 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4431 = icmp slt i32 %4430, 4 + br i1 %4431, label %4432, label %4461 + +4432: ; preds = %4429 + %4433 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %4434 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 2), align 16, !tbaa !9 + %4435 = icmp eq i64 %4433, %4434 + br i1 %4435, label %4436, label %4461 + +4436: ; preds = %4432 + %4437 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %4438 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 1), align 8, !tbaa !9 + %4439 = icmp eq i64 %4437, %4438 + br i1 %4439, label %4440, label %4461 + +4440: ; preds = %4436 + %4441 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 4), align 16, !tbaa !9 + %4442 = load volatile i64, i64* getelementptr inbounds ([5 x i64], [5 x i64]* @P2_marking_member_0, i64 0, i64 3), align 8, !tbaa !9 + %4443 = icmp sgt i64 %4442, %4441 + br i1 %4443, label %4444, label %4461 + +4444: ; preds = %4440 + %4445 = load volatile i32, i32* @P2_is_marked, align 4, !tbaa !5 + %4446 = add nsw i32 %4445, -4 + store volatile i32 %4446, i32* @P2_is_marked, align 4, !tbaa !5 + %4447 = add nsw i64 %4442, %4441 + %4448 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4449 = sext i32 %4448 to i64 + %4450 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4449 + store volatile i64 %4441, i64* %4450, align 8, !tbaa !9 + %4451 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4452 = add nsw i32 %4451, 1 + %4453 = sext i32 %4452 to i64 + %4454 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4453 + store volatile i64 %4442, i64* %4454, align 8, !tbaa !9 + %4455 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4456 = add nsw i32 %4455, 2 + %4457 = sext i32 %4456 to i64 + %4458 = getelementptr inbounds [6 x i64], [6 x i64]* @P3_marking_member_0, i64 0, i64 %4457 + store volatile i64 %4447, i64* %4458, align 8, !tbaa !9 + %4459 = load volatile i32, i32* @P3_is_marked, align 4, !tbaa !5 + %4460 = add nsw i32 %4459, 3 + store volatile i32 %4460, i32* @P3_is_marked, align 4, !tbaa !5 + br label %4461 + +4461: ; preds = %4440, %4444, %4436, %4432, %4429, %4426 + %4462 = icmp ugt i32 %2, 1 + br i1 %4462, label %1, label %4463, !llvm.loop !11 + +4463: ; preds = %4461 + ret i32 77 +} + +attributes #0 = { nofree norecurse nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = !{!6, !6, i64 0} +!6 = !{!"int", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C/C++ TBAA"} +!9 = !{!10, !10, i64 0} +!10 = !{!"long", !7, i64 0} +!11 = distinct !{!11, !12, !13} +!12 = !{!"llvm.loop.mustprogress"} +!13 = !{!"llvm.loop.unroll.disable"} diff --git a/test/prime.ll b/test/prime.ll new file mode 100644 index 0000000..0feaa24 --- /dev/null +++ b/test/prime.ll @@ -0,0 +1,122 @@ +; ModuleID = 'prime.c' +source_filename = "prime.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone sspstrong uwtable willreturn +define dso_local zeroext i8 @divides(i32 %0, i32 %1) local_unnamed_addr #0 { + %3 = urem i32 %1, %0 + %4 = icmp eq i32 %3, 0 + %5 = zext i1 %4 to i8 + ret i8 %5 +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone sspstrong uwtable willreturn +define dso_local zeroext i8 @even(i32 %0) local_unnamed_addr #0 { + %2 = trunc i32 %0 to i8 + %3 = and i8 %2, 1 + %4 = xor i8 %3, 1 + ret i8 %4 +} + +; Function Attrs: nofree norecurse nosync nounwind readnone sspstrong uwtable +define dso_local zeroext i8 @prime(i32 %0) local_unnamed_addr #1 { + %2 = and i32 %0, 1 + %3 = icmp eq i32 %2, 0 + br i1 %3, label %6, label %4 + +4: ; preds = %1 + %5 = icmp ult i32 %0, 9 + br i1 %5, label %16, label %11 + +6: ; preds = %1 + %7 = icmp eq i32 %0, 2 + br label %18 + +8: ; preds = %11 + %9 = mul i32 %15, %15 + %10 = icmp ugt i32 %9, %0 + br i1 %10, label %16, label %11, !llvm.loop !5 + +11: ; preds = %4, %8 + %12 = phi i32 [ %15, %8 ], [ 3, %4 ] + %13 = urem i32 %0, %12 + %14 = icmp eq i32 %13, 0 + %15 = add i32 %12, 2 + br i1 %14, label %18, label %8 + +16: ; preds = %8, %4 + %17 = icmp ugt i32 %0, 1 + br label %18 + +18: ; preds = %11, %16, %6 + %19 = phi i1 [ %7, %6 ], [ %17, %16 ], [ false, %11 ] + %20 = zext i1 %19 to i8 + ret i8 %20 +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn +define dso_local void @swap(i32* nocapture %0, i32* nocapture %1) local_unnamed_addr #2 { + %3 = load i32, i32* %0, align 4, !tbaa !8 + %4 = load i32, i32* %1, align 4, !tbaa !8 + store i32 %4, i32* %0, align 4, !tbaa !8 + store i32 %3, i32* %1, align 4, !tbaa !8 + ret void +} + +; Function Attrs: nofree nosync nounwind readnone sspstrong uwtable +define dso_local i32 @main() local_unnamed_addr #3 { + br label %1 + +1: ; preds = %1, %0 + %2 = phi i32 [ 3, %0 ], [ %5, %1 ] + %3 = urem i32 513239, %2 + %4 = icmp eq i32 %3, 0 + %5 = add nuw nsw i32 %2, 2 + %6 = mul i32 %5, %5 + %7 = icmp ugt i32 %6, 513239 + %8 = select i1 %4, i1 true, i1 %7 + br i1 %8, label %9, label %1, !llvm.loop !5 + +9: ; preds = %1 + br i1 %4, label %20, label %10 + +10: ; preds = %9, %10 + %11 = phi i32 [ %14, %10 ], [ 3, %9 ] + %12 = urem i32 21649, %11 + %13 = icmp eq i32 %12, 0 + %14 = add nuw nsw i32 %11, 2 + %15 = mul i32 %14, %14 + %16 = icmp ugt i32 %15, 21649 + %17 = select i1 %13, i1 true, i1 %16 + br i1 %17, label %18, label %10, !llvm.loop !5 + +18: ; preds = %10 + %19 = zext i1 %13 to i32 + br label %20 + +20: ; preds = %18, %9 + %21 = phi i32 [ 1, %9 ], [ %19, %18 ] + ret i32 %21 +} + +attributes #0 = { mustprogress nofree norecurse nosync nounwind readnone sspstrong uwtable willreturn "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #1 = { nofree norecurse nosync nounwind readnone sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #2 = { mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #3 = { nofree nosync nounwind readnone sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = distinct !{!5, !6, !7} +!6 = !{!"llvm.loop.mustprogress"} +!7 = !{!"llvm.loop.unroll.disable"} +!8 = !{!9, !9, i64 0} +!9 = !{!"int", !10, i64 0} +!10 = !{!"omnipotent char", !11, i64 0} +!11 = !{!"Simple C/C++ TBAA"} diff --git a/test/qsort-exam.ll b/test/qsort-exam.ll new file mode 100644 index 0000000..5b3a00c --- /dev/null +++ b/test/qsort-exam.ll @@ -0,0 +1,232 @@ +; ModuleID = 'qsort-exam.c' +source_filename = "qsort-exam.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +@arr = dso_local local_unnamed_addr global [20 x float] [float 5.000000e+00, float 4.000000e+00, float 0x40249999A0000000, float 0x3FF19999A0000000, float 0x4016CCCCC0000000, float 1.000000e+02, float 2.310000e+02, float 1.110000e+02, float 4.950000e+01, float 9.900000e+01, float 1.000000e+01, float 1.500000e+02, float 0x406BC70A40000000, float 1.010000e+02, float 7.700000e+01, float 4.400000e+01, float 3.500000e+01, float 0x40348A3D80000000, float 0x4058FF5C20000000, float 0x40563851E0000000], align 16 +@istack = dso_local local_unnamed_addr global [100 x i32] zeroinitializer, align 16 + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local void @sort(i64 %0) local_unnamed_addr #0 { + br label %2 + +2: ; preds = %50, %1 + %3 = phi i64 [ %0, %1 ], [ %51, %50 ] + %4 = phi i64 [ 1, %1 ], [ %52, %50 ] + %5 = phi i32 [ 0, %1 ], [ %53, %50 ] + %6 = getelementptr inbounds [20 x float], [20 x float]* @arr, i64 0, i64 %3 + br label %7 + +7: ; preds = %2, %111 + %8 = phi i64 [ %84, %111 ], [ %4, %2 ] + %9 = phi i32 [ %98, %111 ], [ %5, %2 ] + %10 = sub i64 %3, %8 + %11 = icmp ult i64 %10, 7 + br i1 %11, label %12, label %54 + +12: ; preds = %7 + %13 = add i64 %8, 1 + %14 = icmp ugt i64 %13, %3 + br i1 %14, label %37, label %15 + +15: ; preds = %12, %31 + %16 = phi i64 [ %35, %31 ], [ %13, %12 ] + %17 = phi i64 [ %16, %31 ], [ %8, %12 ] + %18 = getelementptr inbounds [20 x float], [20 x float]* @arr, i64 0, i64 %16 + %19 = load float, float* %18, align 4, !tbaa !5 + %20 = icmp ult i64 %17, %8 + br i1 %20, label %31, label %21 + +21: ; preds = %15, %26 + %22 = phi i64 [ %29, %26 ], [ %17, %15 ] + %23 = getelementptr inbounds [20 x float], [20 x float]* @arr, i64 0, i64 %22 + %24 = load float, float* %23, align 4, !tbaa !5 + %25 = fcmp ugt float %24, %19 + br i1 %25, label %26, label %31 + +26: ; preds = %21 + %27 = add i64 %22, 1 + %28 = getelementptr inbounds [20 x float], [20 x float]* @arr, i64 0, i64 %27 + store float %24, float* %28, align 4, !tbaa !5 + %29 = add i64 %22, -1 + %30 = icmp ult i64 %29, %8 + br i1 %30, label %31, label %21, !llvm.loop !9 + +31: ; preds = %26, %21, %15 + %32 = phi i64 [ %17, %15 ], [ %29, %26 ], [ %22, %21 ] + %33 = add i64 %32, 1 + %34 = getelementptr inbounds [20 x float], [20 x float]* @arr, i64 0, i64 %33 + store float %19, float* %34, align 4, !tbaa !5 + %35 = add i64 %16, 1 + %36 = icmp ugt i64 %35, %3 + br i1 %36, label %37, label %15, !llvm.loop !12 + +37: ; preds = %31, %12 + %38 = icmp eq i32 %9, 0 + br i1 %38, label %120, label %39 + +39: ; preds = %37 + %40 = add nsw i32 %9, -1 + %41 = sext i32 %9 to i64 + %42 = getelementptr inbounds [100 x i32], [100 x i32]* @istack, i64 0, i64 %41 + %43 = load i32, i32* %42, align 4, !tbaa !13 + %44 = sext i32 %43 to i64 + %45 = add nsw i32 %9, -2 + %46 = sext i32 %40 to i64 + %47 = getelementptr inbounds [100 x i32], [100 x i32]* @istack, i64 0, i64 %46 + %48 = load i32, i32* %47, align 4, !tbaa !13 + %49 = sext i32 %48 to i64 + br label %50 + +50: ; preds = %39, %102 + %51 = phi i64 [ %110, %102 ], [ %44, %39 ] + %52 = phi i64 [ %8, %102 ], [ %49, %39 ] + %53 = phi i32 [ %98, %102 ], [ %45, %39 ] + br label %2, !llvm.loop !15 + +54: ; preds = %7 + %55 = add i64 %8, %3 + %56 = lshr i64 %55, 1 + %57 = getelementptr inbounds [20 x float], [20 x float]* @arr, i64 0, i64 %56 + %58 = load float, float* %57, align 4, !tbaa !5 + %59 = add i64 %8, 1 + %60 = getelementptr inbounds [20 x float], [20 x float]* @arr, i64 0, i64 %59 + %61 = load float, float* %60, align 4, !tbaa !5 + store float %61, float* %57, align 4, !tbaa !5 + store float %58, float* %60, align 4, !tbaa !5 + %62 = getelementptr inbounds [20 x float], [20 x float]* @arr, i64 0, i64 %8 + %63 = load float, float* %62, align 4, !tbaa !5 + %64 = load float, float* %6, align 4, !tbaa !5 + %65 = fcmp ogt float %63, %64 + br i1 %65, label %66, label %67 + +66: ; preds = %54 + store float %64, float* %62, align 4, !tbaa !5 + store float %63, float* %6, align 4, !tbaa !5 + br label %67 + +67: ; preds = %66, %54 + %68 = load float, float* %60, align 4, !tbaa !5 + %69 = load float, float* %6, align 4, !tbaa !5 + %70 = fcmp ogt float %68, %69 + br i1 %70, label %71, label %72 + +71: ; preds = %67 + store float %69, float* %60, align 4, !tbaa !5 + store float %68, float* %6, align 4, !tbaa !5 + br label %72 + +72: ; preds = %71, %67 + %73 = load float, float* %62, align 4, !tbaa !5 + %74 = load float, float* %60, align 4, !tbaa !5 + %75 = fcmp ogt float %73, %74 + br i1 %75, label %76, label %77 + +76: ; preds = %72 + store float %74, float* %62, align 4, !tbaa !5 + store float %73, float* %60, align 4, !tbaa !5 + br label %77 + +77: ; preds = %76, %72 + %78 = load float, float* %60, align 4, !tbaa !5 + br label %79 + +79: ; preds = %96, %77 + %80 = phi i64 [ %3, %77 ], [ %90, %96 ] + %81 = phi i64 [ %59, %77 ], [ %84, %96 ] + br label %82 + +82: ; preds = %82, %79 + %83 = phi i64 [ %81, %79 ], [ %84, %82 ] + %84 = add i64 %83, 1 + %85 = getelementptr inbounds [20 x float], [20 x float]* @arr, i64 0, i64 %84 + %86 = load float, float* %85, align 4, !tbaa !5 + %87 = fcmp olt float %86, %78 + br i1 %87, label %82, label %88, !llvm.loop !16 + +88: ; preds = %82, %88 + %89 = phi i64 [ %90, %88 ], [ %80, %82 ] + %90 = add i64 %89, -1 + %91 = getelementptr inbounds [20 x float], [20 x float]* @arr, i64 0, i64 %90 + %92 = load float, float* %91, align 4, !tbaa !5 + %93 = fcmp ogt float %92, %78 + br i1 %93, label %88, label %94, !llvm.loop !17 + +94: ; preds = %88 + %95 = icmp ult i64 %90, %84 + br i1 %95, label %97, label %96 + +96: ; preds = %94 + store float %92, float* %85, align 4, !tbaa !5 + store float %86, float* %91, align 4, !tbaa !5 + br label %79, !llvm.loop !18 + +97: ; preds = %94 + store float %92, float* %60, align 4, !tbaa !5 + store float %78, float* %91, align 4, !tbaa !5 + %98 = add nsw i32 %9, 2 + %99 = sub i64 %3, %83 + %100 = sub i64 %90, %8 + %101 = icmp ult i64 %99, %100 + br i1 %101, label %111, label %102 + +102: ; preds = %97 + %103 = trunc i64 %3 to i32 + %104 = sext i32 %98 to i64 + %105 = getelementptr inbounds [100 x i32], [100 x i32]* @istack, i64 0, i64 %104 + store i32 %103, i32* %105, align 4, !tbaa !13 + %106 = trunc i64 %84 to i32 + %107 = add nsw i32 %9, 1 + %108 = sext i32 %107 to i64 + %109 = getelementptr inbounds [100 x i32], [100 x i32]* @istack, i64 0, i64 %108 + store i32 %106, i32* %109, align 4, !tbaa !13 + %110 = add i64 %89, -2 + br label %50 + +111: ; preds = %97 + %112 = trunc i64 %89 to i32 + %113 = add i32 %112, -2 + %114 = sext i32 %98 to i64 + %115 = getelementptr inbounds [100 x i32], [100 x i32]* @istack, i64 0, i64 %114 + store i32 %113, i32* %115, align 4, !tbaa !13 + %116 = trunc i64 %8 to i32 + %117 = add nsw i32 %9, 1 + %118 = sext i32 %117 to i64 + %119 = getelementptr inbounds [100 x i32], [100 x i32]* @istack, i64 0, i64 %118 + store i32 %116, i32* %119, align 4, !tbaa !13 + br label %7, !llvm.loop !15 + +120: ; preds = %37 + ret void +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local i32 @main() local_unnamed_addr #0 { + call void @sort(i64 20) + ret i32 0 +} + +attributes #0 = { nofree norecurse nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = !{!6, !6, i64 0} +!6 = !{!"float", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C/C++ TBAA"} +!9 = distinct !{!9, !10, !11} +!10 = !{!"llvm.loop.mustprogress"} +!11 = !{!"llvm.loop.unroll.disable"} +!12 = distinct !{!12, !10, !11} +!13 = !{!14, !14, i64 0} +!14 = !{!"int", !7, i64 0} +!15 = distinct !{!15, !11} +!16 = distinct !{!16, !10, !11} +!17 = distinct !{!17, !10, !11} +!18 = distinct !{!18, !11} diff --git a/test/qurt.ll b/test/qurt.ll new file mode 100644 index 0000000..e9bfacc --- /dev/null +++ b/test/qurt.ll @@ -0,0 +1,207 @@ +; ModuleID = 'qurt.c' +source_filename = "qurt.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +@a = dso_local local_unnamed_addr global [3 x double] zeroinitializer, align 16 +@flag = dso_local local_unnamed_addr global i32 0, align 4 +@x1 = dso_local local_unnamed_addr global [2 x double] zeroinitializer, align 16 +@x2 = dso_local local_unnamed_addr global [2 x double] zeroinitializer, align 16 + +; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone sspstrong uwtable willreturn +define dso_local double @qurt_fabs(double %0) local_unnamed_addr #0 { + %2 = fcmp ult double %0, 0.000000e+00 + %3 = fneg double %0 + %4 = select i1 %2, double %3, double %0 + ret double %4 +} + +; Function Attrs: nofree norecurse nosync nounwind readnone sspstrong uwtable +define dso_local double @qurt_sqrt(double %0) local_unnamed_addr #1 { + %2 = fcmp oeq double %0, 0.000000e+00 + br i1 %2, label %28, label %3 + +3: ; preds = %1 + %4 = fdiv double %0, 1.000000e+01 + br label %5 + +5: ; preds = %3, %23 + %6 = phi i32 [ 0, %3 ], [ %25, %23 ] + %7 = phi i32 [ 1, %3 ], [ %26, %23 ] + %8 = phi double [ %4, %3 ], [ %24, %23 ] + %9 = icmp eq i32 %6, 0 + br i1 %9, label %10, label %23 + +10: ; preds = %5 + %11 = fmul double %8, %8 + %12 = fsub double %0, %11 + %13 = fmul double %8, 2.000000e+00 + %14 = fdiv double %12, %13 + %15 = fadd double %8, %14 + %16 = fmul double %15, %15 + %17 = fsub double %0, %16 + %18 = fcmp ult double %17, 0.000000e+00 + %19 = fneg double %17 + %20 = select i1 %18, double %19, double %17 + %21 = fcmp ugt double %20, 1.000000e-05 + br i1 %21, label %23, label %22 + +22: ; preds = %10 + br label %23 + +23: ; preds = %22, %10, %5 + %24 = phi double [ %8, %5 ], [ %15, %22 ], [ %15, %10 ] + %25 = phi i32 [ 1, %5 ], [ 1, %22 ], [ 0, %10 ] + %26 = add nuw nsw i32 %7, 1 + %27 = icmp eq i32 %26, 20 + br i1 %27, label %28, label %5, !llvm.loop !5 + +28: ; preds = %23, %1 + %29 = phi double [ 0.000000e+00, %1 ], [ %24, %23 ] + ret double %29 +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local i32 @main() local_unnamed_addr #2 { + store double 1.000000e+00, double* getelementptr inbounds ([3 x double], [3 x double]* @a, i64 0, i64 0), align 16, !tbaa !8 + store double -3.000000e+00, double* getelementptr inbounds ([3 x double], [3 x double]* @a, i64 0, i64 1), align 8, !tbaa !8 + store double 2.000000e+00, double* getelementptr inbounds ([3 x double], [3 x double]* @a, i64 0, i64 2), align 16, !tbaa !8 + %1 = call i32 @qurt() + store double 1.000000e+00, double* getelementptr inbounds ([3 x double], [3 x double]* @a, i64 0, i64 0), align 16, !tbaa !8 + store double -2.000000e+00, double* getelementptr inbounds ([3 x double], [3 x double]* @a, i64 0, i64 1), align 8, !tbaa !8 + store double 1.000000e+00, double* getelementptr inbounds ([3 x double], [3 x double]* @a, i64 0, i64 2), align 16, !tbaa !8 + %2 = call i32 @qurt() + store double 1.000000e+00, double* getelementptr inbounds ([3 x double], [3 x double]* @a, i64 0, i64 0), align 16, !tbaa !8 + store double -4.000000e+00, double* getelementptr inbounds ([3 x double], [3 x double]* @a, i64 0, i64 1), align 8, !tbaa !8 + store double 8.000000e+00, double* getelementptr inbounds ([3 x double], [3 x double]* @a, i64 0, i64 2), align 16, !tbaa !8 + %3 = call i32 @qurt() + ret i32 0 +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local i32 @qurt() local_unnamed_addr #2 { + %1 = load double, double* getelementptr inbounds ([3 x double], [3 x double]* @a, i64 0, i64 0), align 16, !tbaa !8 + %2 = fcmp oeq double %1, 0.000000e+00 + br i1 %2, label %61, label %3 + +3: ; preds = %0 + %4 = load double, double* getelementptr inbounds ([3 x double], [3 x double]* @a, i64 0, i64 1), align 8, !tbaa !8 + %5 = fmul double %4, %4 + %6 = fmul double %1, 4.000000e+00 + %7 = load double, double* getelementptr inbounds ([3 x double], [3 x double]* @a, i64 0, i64 2), align 16, !tbaa !8 + %8 = fmul double %6, %7 + %9 = fsub double %5, %8 + %10 = fmul double %1, 2.000000e+00 + %11 = fcmp ult double %9, 0.000000e+00 + %12 = fneg double %9 + %13 = select i1 %11, double %12, double %9 + %14 = fcmp oeq double %13, 0.000000e+00 + br i1 %14, label %40, label %15 + +15: ; preds = %3 + %16 = fdiv double %13, 1.000000e+01 + br label %17 + +17: ; preds = %35, %15 + %18 = phi i32 [ 0, %15 ], [ %37, %35 ] + %19 = phi i32 [ 1, %15 ], [ %38, %35 ] + %20 = phi double [ %16, %15 ], [ %36, %35 ] + %21 = icmp eq i32 %18, 0 + br i1 %21, label %22, label %35 + +22: ; preds = %17 + %23 = fmul double %20, %20 + %24 = fsub double %13, %23 + %25 = fmul double %20, 2.000000e+00 + %26 = fdiv double %24, %25 + %27 = fadd double %20, %26 + %28 = fmul double %27, %27 + %29 = fsub double %13, %28 + %30 = fcmp ult double %29, 0.000000e+00 + %31 = fneg double %29 + %32 = select i1 %30, double %31, double %29 + %33 = fcmp ugt double %32, 1.000000e-05 + br i1 %33, label %35, label %34 + +34: ; preds = %22 + br label %35 + +35: ; preds = %34, %22, %17 + %36 = phi double [ %20, %17 ], [ %27, %34 ], [ %27, %22 ] + %37 = phi i32 [ 1, %17 ], [ 1, %34 ], [ 0, %22 ] + %38 = add nuw nsw i32 %19, 1 + %39 = icmp eq i32 %38, 20 + br i1 %39, label %40, label %17, !llvm.loop !5 + +40: ; preds = %35, %3 + %41 = phi double [ 0.000000e+00, %3 ], [ %36, %35 ] + %42 = fcmp ogt double %9, 0.000000e+00 + br i1 %42, label %43, label %49 + +43: ; preds = %40 + store i32 1, i32* @flag, align 4, !tbaa !12 + %44 = fsub double %41, %4 + %45 = fdiv double %44, %10 + store double %45, double* getelementptr inbounds ([2 x double], [2 x double]* @x1, i64 0, i64 0), align 16, !tbaa !8 + store double 0.000000e+00, double* getelementptr inbounds ([2 x double], [2 x double]* @x1, i64 0, i64 1), align 8, !tbaa !8 + %46 = fneg double %4 + %47 = fsub double %46, %41 + %48 = fdiv double %47, %10 + store double %48, double* getelementptr inbounds ([2 x double], [2 x double]* @x2, i64 0, i64 0), align 16, !tbaa !8 + br label %59 + +49: ; preds = %40 + %50 = fcmp oeq double %9, 0.000000e+00 + br i1 %50, label %51, label %54 + +51: ; preds = %49 + store i32 0, i32* @flag, align 4, !tbaa !12 + %52 = fneg double %4 + %53 = fdiv double %52, %10 + store double %53, double* getelementptr inbounds ([2 x double], [2 x double]* @x1, i64 0, i64 0), align 16, !tbaa !8 + store double 0.000000e+00, double* getelementptr inbounds ([2 x double], [2 x double]* @x1, i64 0, i64 1), align 8, !tbaa !8 + store double %53, double* getelementptr inbounds ([2 x double], [2 x double]* @x2, i64 0, i64 0), align 16, !tbaa !8 + br label %59 + +54: ; preds = %49 + store i32 -1, i32* @flag, align 4, !tbaa !12 + %55 = fdiv double %41, %10 + %56 = fneg double %4 + %57 = fdiv double %56, %10 + store double %57, double* getelementptr inbounds ([2 x double], [2 x double]* @x1, i64 0, i64 0), align 16, !tbaa !8 + store double %55, double* getelementptr inbounds ([2 x double], [2 x double]* @x1, i64 0, i64 1), align 8, !tbaa !8 + store double %57, double* getelementptr inbounds ([2 x double], [2 x double]* @x2, i64 0, i64 0), align 16, !tbaa !8 + %58 = fneg double %55 + br label %59 + +59: ; preds = %43, %51, %54 + %60 = phi double [ %58, %54 ], [ 0.000000e+00, %51 ], [ 0.000000e+00, %43 ] + store double %60, double* getelementptr inbounds ([2 x double], [2 x double]* @x2, i64 0, i64 1), align 8, !tbaa !8 + br label %61 + +61: ; preds = %59, %0 + %62 = phi i32 [ 999, %0 ], [ 0, %59 ] + ret i32 %62 +} + +attributes #0 = { mustprogress nofree norecurse nosync nounwind readnone sspstrong uwtable willreturn "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #1 = { nofree norecurse nosync nounwind readnone sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #2 = { nofree norecurse nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = distinct !{!5, !6, !7} +!6 = !{!"llvm.loop.mustprogress"} +!7 = !{!"llvm.loop.unroll.disable"} +!8 = !{!9, !9, i64 0} +!9 = !{!"double", !10, i64 0} +!10 = !{!"omnipotent char", !11, i64 0} +!11 = !{!"Simple C/C++ TBAA"} +!12 = !{!13, !13, i64 0} +!13 = !{!"int", !10, i64 0} diff --git a/test/recursion.ll b/test/recursion.ll new file mode 100644 index 0000000..d3c4d36 --- /dev/null +++ b/test/recursion.ll @@ -0,0 +1,82 @@ +; ModuleID = 'recursion.c' +source_filename = "recursion.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +@In = external global i32, align 4 + +; Function Attrs: nofree nosync nounwind readnone sspstrong uwtable +define dso_local i32 @fib(i32 %0) local_unnamed_addr #0 { + %2 = icmp ult i32 %0, 2 + br i1 %2, label %9, label %3 + +3: ; preds = %1 + %4 = add nsw i32 %0, -1 + %5 = call i32 @fib(i32 %4) + %6 = add nsw i32 %0, -2 + %7 = call i32 @fib(i32 %6) + %8 = add nsw i32 %7, %5 + br label %9 + +9: ; preds = %1, %3 + %10 = phi i32 [ %8, %3 ], [ 1, %1 ] + ret i32 %10 +} + +; Function Attrs: nofree nosync nounwind readnone sspstrong uwtable +define dso_local i32 @kalle(i32 %0) local_unnamed_addr #0 { + %2 = icmp slt i32 %0, 1 + br i1 %2, label %8, label %3 + +3: ; preds = %1 + %4 = icmp eq i32 %0, 1 + br i1 %4, label %8, label %5 + +5: ; preds = %3 + %6 = add nsw i32 %0, -2 + %7 = call i32 @kalle(i32 %6) #2 + br label %8 + +8: ; preds = %5, %3, %1 + %9 = phi i32 [ 0, %1 ], [ %7, %5 ], [ 1, %3 ] + ret i32 %9 +} + +; Function Attrs: nofree nosync nounwind readnone sspstrong uwtable +define dso_local i32 @anka(i32 %0) local_unnamed_addr #0 { + %2 = icmp slt i32 %0, 1 + br i1 %2, label %6, label %3 + +3: ; preds = %1 + %4 = add nsw i32 %0, -1 + %5 = call i32 @kalle(i32 %4) + br label %6 + +6: ; preds = %1, %3 + %7 = phi i32 [ %5, %3 ], [ 1, %1 ] + ret i32 %7 +} + +; Function Attrs: nofree nounwind sspstrong uwtable +define dso_local void @main() local_unnamed_addr #1 { + %1 = call i32 @fib(i32 10) + store volatile i32 %1, i32* @In, align 4, !tbaa !5 + ret void +} + +attributes #0 = { nofree nosync nounwind readnone sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #1 = { nofree nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #2 = { nounwind } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = !{!6, !6, i64 0} +!6 = !{!"int", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C/C++ TBAA"} diff --git a/test/select.ll b/test/select.ll new file mode 100644 index 0000000..bbae683 --- /dev/null +++ b/test/select.ll @@ -0,0 +1,172 @@ +; ModuleID = 'select.c' +source_filename = "select.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +@arr = dso_local local_unnamed_addr global [20 x float] [float 5.000000e+00, float 4.000000e+00, float 0x40249999A0000000, float 0x3FF19999A0000000, float 0x4016CCCCC0000000, float 1.000000e+02, float 2.310000e+02, float 1.110000e+02, float 4.950000e+01, float 9.900000e+01, float 1.000000e+01, float 1.500000e+02, float 0x406BC70A40000000, float 1.010000e+02, float 7.700000e+01, float 4.400000e+01, float 3.500000e+01, float 0x40348A3D80000000, float 0x4058FF5C20000000, float 0x408BC70A40000000], align 16 + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local float @select(i64 %0, i64 %1) local_unnamed_addr #0 { + br label %3 + +3: ; preds = %2, %79 + %4 = phi i32 [ 0, %2 ], [ %83, %79 ] + %5 = phi i64 [ %1, %2 ], [ %81, %79 ] + %6 = phi i64 [ 1, %2 ], [ %80, %79 ] + %7 = add i64 %6, 1 + %8 = icmp ugt i64 %5, %7 + br i1 %8, label %18, label %9 + +9: ; preds = %3 + %10 = icmp eq i64 %5, %7 + br i1 %10, label %11, label %79 + +11: ; preds = %9 + %12 = getelementptr inbounds [20 x float], [20 x float]* @arr, i64 0, i64 %5 + %13 = load float, float* %12, align 4, !tbaa !5 + %14 = getelementptr inbounds [20 x float], [20 x float]* @arr, i64 0, i64 %6 + %15 = load float, float* %14, align 4, !tbaa !5 + %16 = fcmp olt float %13, %15 + br i1 %16, label %17, label %79 + +17: ; preds = %11 + store float %13, float* %14, align 4, !tbaa !5 + store float %15, float* %12, align 4, !tbaa !5 + br label %79 + +18: ; preds = %3 + %19 = add i64 %5, %6 + %20 = lshr i64 %19, 1 + %21 = getelementptr inbounds [20 x float], [20 x float]* @arr, i64 0, i64 %20 + %22 = load float, float* %21, align 4, !tbaa !5 + %23 = getelementptr inbounds [20 x float], [20 x float]* @arr, i64 0, i64 %7 + %24 = load float, float* %23, align 4, !tbaa !5 + store float %24, float* %21, align 4, !tbaa !5 + store float %22, float* %23, align 4, !tbaa !5 + %25 = getelementptr inbounds [20 x float], [20 x float]* @arr, i64 0, i64 %5 + %26 = load float, float* %25, align 4, !tbaa !5 + %27 = fcmp ogt float %22, %26 + br i1 %27, label %28, label %29 + +28: ; preds = %18 + store float %26, float* %23, align 4, !tbaa !5 + store float %22, float* %25, align 4, !tbaa !5 + br label %29 + +29: ; preds = %28, %18 + %30 = getelementptr inbounds [20 x float], [20 x float]* @arr, i64 0, i64 %6 + %31 = load float, float* %30, align 4, !tbaa !5 + %32 = load float, float* %25, align 4, !tbaa !5 + %33 = fcmp ogt float %31, %32 + br i1 %33, label %34, label %35 + +34: ; preds = %29 + store float %32, float* %30, align 4, !tbaa !5 + store float %31, float* %25, align 4, !tbaa !5 + br label %35 + +35: ; preds = %34, %29 + %36 = phi float [ %31, %34 ], [ %22, %29 ] + %37 = load float, float* %23, align 4, !tbaa !5 + %38 = load float, float* %30, align 4, !tbaa !5 + %39 = fcmp ogt float %37, %38 + br i1 %39, label %40, label %41 + +40: ; preds = %35 + store float %38, float* %23, align 4, !tbaa !5 + store float %37, float* %30, align 4, !tbaa !5 + br label %41 + +41: ; preds = %40, %35 + %42 = phi float [ %37, %40 ], [ %36, %35 ] + %43 = load float, float* %30, align 4, !tbaa !5 + %44 = icmp eq i32 %4, 0 + br i1 %44, label %45, label %68 + +45: ; preds = %41, %62 + %46 = phi i32 [ %64, %62 ], [ %4, %41 ] + %47 = phi float [ %66, %62 ], [ %42, %41 ] + %48 = phi i64 [ %52, %62 ], [ %7, %41 ] + %49 = phi i64 [ %58, %62 ], [ %5, %41 ] + br label %50 + +50: ; preds = %45, %50 + %51 = phi i64 [ %52, %50 ], [ %48, %45 ] + %52 = add i64 %51, 1 + %53 = getelementptr inbounds [20 x float], [20 x float]* @arr, i64 0, i64 %52 + %54 = load float, float* %53, align 4, !tbaa !5 + %55 = fcmp olt float %54, %43 + br i1 %55, label %50, label %56, !llvm.loop !9 + +56: ; preds = %50, %56 + %57 = phi i64 [ %58, %56 ], [ %49, %50 ] + %58 = add i64 %57, -1 + %59 = getelementptr inbounds [20 x float], [20 x float]* @arr, i64 0, i64 %58 + %60 = load float, float* %59, align 4, !tbaa !5 + %61 = fcmp ogt float %60, %43 + br i1 %61, label %56, label %62, !llvm.loop !12 + +62: ; preds = %56 + %63 = icmp ult i64 %58, %52 + %64 = select i1 %63, i32 1, i32 %46 + %65 = icmp eq i32 %64, 0 + %66 = select i1 %65, float %54, float %47 + store float %60, float* %53, align 4, !tbaa !5 + store float %66, float* %59, align 4, !tbaa !5 + %67 = icmp eq i32 %64, 0 + br i1 %67, label %45, label %68, !llvm.loop !13 + +68: ; preds = %62, %41 + %69 = phi i64 [ %5, %41 ], [ %58, %62 ] + %70 = phi i64 [ %7, %41 ], [ %52, %62 ] + %71 = phi i32 [ %4, %41 ], [ %64, %62 ] + %72 = getelementptr inbounds [20 x float], [20 x float]* @arr, i64 0, i64 %69 + %73 = load float, float* %72, align 4, !tbaa !5 + store float %73, float* %30, align 4, !tbaa !5 + store float %43, float* %72, align 4, !tbaa !5 + %74 = icmp ult i64 %69, %0 + %75 = add i64 %69, -1 + %76 = select i1 %74, i64 %5, i64 %75 + %77 = icmp ugt i64 %69, %0 + %78 = select i1 %77, i64 %6, i64 %70 + br label %79 + +79: ; preds = %68, %9, %17, %11 + %80 = phi i64 [ %6, %11 ], [ %6, %17 ], [ %6, %9 ], [ %78, %68 ] + %81 = phi i64 [ %5, %11 ], [ %5, %17 ], [ %5, %9 ], [ %76, %68 ] + %82 = phi i1 [ false, %11 ], [ false, %17 ], [ false, %9 ], [ true, %68 ] + %83 = phi i32 [ %4, %11 ], [ %4, %17 ], [ %4, %9 ], [ %71, %68 ] + br i1 %82, label %3, label %84, !llvm.loop !14 + +84: ; preds = %79 + %85 = getelementptr inbounds [20 x float], [20 x float]* @arr, i64 0, i64 %0 + %86 = load float, float* %85, align 4, !tbaa !5 + ret float %86 +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local i32 @main() local_unnamed_addr #0 { + %1 = call float @select(i64 10, i64 20) + ret i32 0 +} + +attributes #0 = { nofree norecurse nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = !{!6, !6, i64 0} +!6 = !{!"float", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C/C++ TBAA"} +!9 = distinct !{!9, !10, !11} +!10 = !{!"llvm.loop.mustprogress"} +!11 = !{!"llvm.loop.unroll.disable"} +!12 = distinct !{!12, !10, !11} +!13 = distinct !{!13, !10, !11} +!14 = distinct !{!14, !10, !11} diff --git a/test/sqrt.ll b/test/sqrt.ll new file mode 100644 index 0000000..9de108f --- /dev/null +++ b/test/sqrt.ll @@ -0,0 +1,78 @@ +; ModuleID = 'sqrt.c' +source_filename = "sqrt.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone sspstrong uwtable willreturn +define dso_local float @fabs(float %0) local_unnamed_addr #0 { + %2 = fcmp olt float %0, 0.000000e+00 + %3 = fneg float %0 + %4 = select i1 %2, float %3, float %0 + ret float %4 +} + +; Function Attrs: nofree nosync nounwind readnone sspstrong uwtable +define dso_local float @sqrtfcn(float %0) local_unnamed_addr #1 { + %2 = fcmp oeq float %0, 0.000000e+00 + br i1 %2, label %30, label %3 + +3: ; preds = %1 + %4 = fdiv float %0, 1.000000e+01 + br label %5 + +5: ; preds = %3, %25 + %6 = phi i32 [ 0, %3 ], [ %27, %25 ] + %7 = phi i32 [ 1, %3 ], [ %28, %25 ] + %8 = phi float [ %4, %3 ], [ %26, %25 ] + %9 = icmp eq i32 %6, 0 + br i1 %9, label %10, label %25 + +10: ; preds = %5 + %11 = fmul float %8, %8 + %12 = fsub float %0, %11 + %13 = fpext float %12 to double + %14 = fpext float %8 to double + %15 = fmul double %14, 2.000000e+00 + %16 = fdiv double %13, %15 + %17 = fptrunc double %16 to float + %18 = fadd float %8, %17 + %19 = fmul float %18, %18 + %20 = fsub float %0, %19 + %21 = call float @llvm.fabs.f32(float %20) + %22 = fpext float %21 to double + %23 = fcmp ugt double %22, 1.000000e-05 + br i1 %23, label %25, label %24 + +24: ; preds = %10 + br label %25 + +25: ; preds = %24, %10, %5 + %26 = phi float [ %8, %5 ], [ %18, %24 ], [ %18, %10 ] + %27 = phi i32 [ 1, %5 ], [ 1, %24 ], [ 0, %10 ] + %28 = add nuw nsw i32 %7, 1 + %29 = icmp eq i32 %28, 20 + br i1 %29, label %30, label %5, !llvm.loop !5 + +30: ; preds = %25, %1 + %31 = phi float [ 0.000000e+00, %1 ], [ %26, %25 ] + ret float %31 +} + +; Function Attrs: nofree nosync nounwind readnone speculatable willreturn +declare float @llvm.fabs.f32(float) #2 + +attributes #0 = { mustprogress nofree norecurse nosync nounwind readnone sspstrong uwtable willreturn "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #1 = { nofree nosync nounwind readnone sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #2 = { nofree nosync nounwind readnone speculatable willreturn } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = distinct !{!5, !6, !7} +!6 = !{!"llvm.loop.mustprogress"} +!7 = !{!"llvm.loop.unroll.disable"} diff --git a/test/src/README.md b/test/src/README.md new file mode 100644 index 0000000..8b4dffc --- /dev/null +++ b/test/src/README.md @@ -0,0 +1,2 @@ +# Mälardalen WCET +Taken from [Link](https://www.mrtc.mdh.se/projects/wcet/benchmarks.html). \ No newline at end of file diff --git a/test/src/adpcm.c b/test/src/adpcm.c new file mode 100755 index 0000000..a771256 --- /dev/null +++ b/test/src/adpcm.c @@ -0,0 +1,878 @@ +/* $Id: adpcm.c,v 1.7 2005/06/15 07:27:31 ael01 Exp $ */ +/*************************************************************************/ +/* */ +/* SNU-RT Benchmark Suite for Worst Case Timing Analysis */ +/* ===================================================== */ +/* Collected and Modified by S.-S. Lim */ +/* sslim@archi.snu.ac.kr */ +/* Real-Time Research Group */ +/* Seoul National University */ +/* */ +/* */ +/* < Features > - restrictions for our experimental environment */ +/* */ +/* 1. Completely structured. */ +/* - There are no unconditional jumps. */ +/* - There are no exit from loop bodies. */ +/* (There are no 'break' or 'return' in loop bodies) */ +/* 2. No 'switch' statements. */ +/* 3. No 'do..while' statements. */ +/* 4. Expressions are restricted. */ +/* - There are no multiple expressions joined by 'or', */ +/* 'and' operations. */ +/* 5. No library calls. */ +/* - All the functions needed are implemented in the */ +/* source file. */ +/* 6. Printouts removed (Jan G) */ +/* */ +/* */ +/* */ +/*************************************************************************/ +/* */ +/* FILE: adpcm.c */ +/* SOURCE : C Algorithms for Real-Time DSP by P. M. Embree */ +/* */ +/* DESCRIPTION : */ +/* */ +/* CCITT G.722 ADPCM (Adaptive Differential Pulse Code Modulation) */ +/* algorithm. */ +/* 16khz sample rate data is stored in the array test_data[SIZE]. */ +/* Results are stored in the array compressed[SIZE] and result[SIZE].*/ +/* Execution time is determined by the constant SIZE (default value */ +/* is 2000). */ +/* */ +/* REMARK : */ +/* */ +/* EXECUTION TIME : */ +/* */ +/* */ +/*************************************************************************/ + +/* To be able to run with printouts +#include */ + +/* common sampling rate for sound cards on IBM/PC */ +#define SAMPLE_RATE 11025 + +#define PI 3141 +#define SIZE 3 +#define IN_END 4 + +/* COMPLEX STRUCTURE */ + +typedef struct { + int real, imag; +} COMPLEX; + +/* function prototypes for fft and filter functions */ +void fft(COMPLEX *,int); +int fir_filter(int input,int *coef,int n,int *history); +int iir_filter(int input,int *coef,int n,int *history); +int gaussian(void); +int my_abs(int n); + +void setup_codec(int),key_down(),int_enable(),int_disable(); +int flags(int); + +int getinput(void); +void sendout(int),flush(); + +int encode(int,int); +void decode(int); +int filtez(int *bpl,int *dlt); +void upzero(int dlt,int *dlti,int *bli); +int filtep(int rlt1,int al1,int rlt2,int al2); +int quantl(int el,int detl); +/* int invqxl(int il,int detl,int *code_table,int mode); */ +int logscl(int il,int nbl); +int scalel(int nbl,int shift_constant); +int uppol2(int al1,int al2,int plt,int plt1,int plt2); +int uppol1(int al1,int apl2,int plt,int plt1); +/* int invqah(int ih,int deth); */ +int logsch(int ih,int nbh); +void reset(); +int my_fabs(int n); +int my_cos(int n); +int my_sin(int n); + +/* G722 C code */ + +/* variables for transimit quadrature mirror filter here */ +int tqmf[24]; + +/* QMF filter coefficients: +scaled by a factor of 4 compared to G722 CCITT recommendation */ +int h[24] = { + 12, -44, -44, 212, 48, -624, 128, 1448, + -840, -3220, 3804, 15504, 15504, 3804, -3220, -840, + 1448, 128, -624, 48, 212, -44, -44, 12 +}; + +int xl,xh; + +/* variables for receive quadrature mirror filter here */ +int accumc[11],accumd[11]; + +/* outputs of decode() */ +int xout1,xout2; + +int xs,xd; + +/* variables for encoder (hi and lo) here */ + +int il,szl,spl,sl,el; + +int qq4_code4_table[16] = { + 0, -20456, -12896, -8968, -6288, -4240, -2584, -1200, + 20456, 12896, 8968, 6288, 4240, 2584, 1200, 0 +}; + +int qq5_code5_table[32] = { + -280, -280, -23352, -17560, -14120, -11664, -9752, -8184, + -6864, -5712, -4696, -3784, -2960, -2208, -1520, -880, + 23352, 17560, 14120, 11664, 9752, 8184, 6864, 5712, + 4696, 3784, 2960, 2208, 1520, 880, 280, -280 +}; + +int qq6_code6_table[64] = { + -136, -136, -136, -136, -24808, -21904, -19008, -16704, +-14984, -13512, -12280, -11192, -10232, -9360, -8576, -7856, + -7192, -6576, -6000, -5456, -4944, -4464, -4008, -3576, + -3168, -2776, -2400, -2032, -1688, -1360, -1040, -728, + 24808, 21904, 19008, 16704, 14984, 13512, 12280, 11192, + 10232, 9360, 8576, 7856, 7192, 6576, 6000, 5456, + 4944, 4464, 4008, 3576, 3168, 2776, 2400, 2032, + 1688, 1360, 1040, 728, 432, 136, -432, -136 +}; + +int delay_bpl[6]; + +int delay_dltx[6]; + +int wl_code_table[16] = { + -60, 3042, 1198, 538, 334, 172, 58, -30, + 3042, 1198, 538, 334, 172, 58, -30, -60 +}; + +int wl_table[8] = { + -60, -30, 58, 172, 334, 538, 1198, 3042 +}; + +int ilb_table[32] = { + 2048, 2093, 2139, 2186, 2233, 2282, 2332, 2383, + 2435, 2489, 2543, 2599, 2656, 2714, 2774, 2834, + 2896, 2960, 3025, 3091, 3158, 3228, 3298, 3371, + 3444, 3520, 3597, 3676, 3756, 3838, 3922, 4008 +}; + +int nbl; /* delay line */ +int al1,al2; +int plt,plt1,plt2; +int rs; +int dlt; +int rlt,rlt1,rlt2; + +/* decision levels - pre-multiplied by 8, 0 to indicate end */ +int decis_levl[30] = { + 280, 576, 880, 1200, 1520, 1864, 2208, 2584, + 2960, 3376, 3784, 4240, 4696, 5200, 5712, 6288, + 6864, 7520, 8184, 8968, 9752, 10712, 11664, 12896, + 14120, 15840, 17560, 20456, 23352, 32767 +}; + +int detl; + +/* quantization table 31 long to make quantl look-up easier, +last entry is for mil=30 case when wd is max */ +int quant26bt_pos[31] = { + 61, 60, 59, 58, 57, 56, 55, 54, + 53, 52, 51, 50, 49, 48, 47, 46, + 45, 44, 43, 42, 41, 40, 39, 38, + 37, 36, 35, 34, 33, 32, 32 +}; + +/* quantization table 31 long to make quantl look-up easier, +last entry is for mil=30 case when wd is max */ +int quant26bt_neg[31] = { + 63, 62, 31, 30, 29, 28, 27, 26, + 25, 24, 23, 22, 21, 20, 19, 18, + 17, 16, 15, 14, 13, 12, 11, 10, + 9, 8, 7, 6, 5, 4, 4 +}; + + +int deth; +int sh; /* this comes from adaptive predictor */ +int eh; + +int qq2_code2_table[4] = { + -7408, -1616, 7408, 1616 +}; + +int wh_code_table[4] = { + 798, -214, 798, -214 +}; + + +int dh,ih; +int nbh,szh; +int sph,ph,yh,rh; + +int delay_dhx[6]; + +int delay_bph[6]; + +int ah1,ah2; +int ph1,ph2; +int rh1,rh2; + +/* variables for decoder here */ +int ilr,yl,rl; +int dec_deth,dec_detl,dec_dlt; + +int dec_del_bpl[6]; + +int dec_del_dltx[6]; + +int dec_plt,dec_plt1,dec_plt2; +int dec_szl,dec_spl,dec_sl; +int dec_rlt1,dec_rlt2,dec_rlt; +int dec_al1,dec_al2; +int dl; +int dec_nbl,dec_yh,dec_dh,dec_nbh; + +/* variables used in filtez */ +int dec_del_bph[6]; + +int dec_del_dhx[6]; + +int dec_szh; +/* variables used in filtep */ +int dec_rh1,dec_rh2; +int dec_ah1,dec_ah2; +int dec_ph,dec_sph; + +int dec_sh,dec_rh; + +int dec_ph1,dec_ph2; + +/* G722 encode function two ints in, one 8 bit output */ + +/* put input samples in xin1 = first value, xin2 = second value */ +/* returns il and ih stored together */ + +/* MAX: 1 */ +int my_abs(int n) +{ + int m; + + if (n >= 0) m = n; + else m = -n; + return m; +} + +/* MAX: 1 */ +int my_fabs(int n) +{ + int f; + + if (n >= 0) f = n; + else f = -n; + return f; +} + +int my_sin(int rad) +{ + int diff; + int app=0; + + int inc = 1; + + /* MAX dependent on rad's value, say 50 */ + while (rad > 2*PI) + rad -= 2*PI; + /* MAX dependent on rad's value, say 50 */ + while (rad < -2*PI) + rad += 2*PI; + diff = rad; + app = diff; + diff = (diff * (-(rad*rad))) / + ((2 * inc) * (2 * inc + 1)); + app = app + diff; + inc++; + /* REALLY: while(my_fabs(diff) >= 0.00001) { */ + /* MAX: 1000 */ + while(my_fabs(diff) >= 1) { + diff = (diff * (-(rad*rad))) / + ((2 * inc) * (2 * inc + 1)); + app = app + diff; + inc++; + } + + return app; +} + + +int my_cos(int rad) +{ + return (my_sin (PI / 2 - rad)); +} + + +/* MAX: 1 */ +int encode(int xin1,int xin2) +{ + int i; + int *h_ptr,*tqmf_ptr,*tqmf_ptr1; + long int xa,xb; + int decis; + +/* transmit quadrature mirror filters implemented here */ + h_ptr = h; + tqmf_ptr = tqmf; + xa = (long)(*tqmf_ptr++) * (*h_ptr++); + xb = (long)(*tqmf_ptr++) * (*h_ptr++); +/* main multiply accumulate loop for samples and coefficients */ + /* MAX: 10 */ + for(i = 0 ; i < 10 ; i++) { + xa += (long)(*tqmf_ptr++) * (*h_ptr++); + xb += (long)(*tqmf_ptr++) * (*h_ptr++); + } +/* final mult/accumulate */ + xa += (long)(*tqmf_ptr++) * (*h_ptr++); + xb += (long)(*tqmf_ptr) * (*h_ptr++); + +/* update delay line tqmf */ + tqmf_ptr1 = tqmf_ptr - 2; + /* MAX: 22 */ + for(i = 0 ; i < 22 ; i++) *tqmf_ptr-- = *tqmf_ptr1--; + *tqmf_ptr-- = xin1; + *tqmf_ptr = xin2; + +/* scale outputs */ + xl = (xa + xb) >> 15; + xh = (xa - xb) >> 15; + +/* end of quadrature mirror filter code */ + +/* starting with lower sub band encoder */ + +/* filtez - compute predictor output section - zero section */ + szl = filtez(delay_bpl,delay_dltx); + +/* filtep - compute predictor output signal (pole section) */ + spl = filtep(rlt1,al1,rlt2,al2); + +/* compute the predictor output value in the lower sub_band encoder */ + sl = szl + spl; + el = xl - sl; + +/* quantl: quantize the difference signal */ + il = quantl(el,detl); + +/* invqxl: computes quantized difference signal */ +/* for invqbl, truncate by 2 lsbs, so mode = 3 */ + dlt = ((long)detl*qq4_code4_table[il >> 2]) >> 15; + +/* logscl: updates logarithmic quant. scale factor in low sub band */ + nbl = logscl(il,nbl); + +/* scalel: compute the quantizer scale factor in the lower sub band */ +/* calling parameters nbl and 8 (constant such that scalel can be scaleh) */ + detl = scalel(nbl,8); + +/* parrec - simple addition to compute recontructed signal for adaptive pred */ + plt = dlt + szl; + +/* upzero: update zero section predictor coefficients (sixth order)*/ +/* calling parameters: dlt, dlt1, dlt2, ..., dlt6 from dlt */ +/* bpli (linear_buffer in which all six values are delayed */ +/* return params: updated bpli, delayed dltx */ + upzero(dlt,delay_dltx,delay_bpl); + +/* uppol2- update second predictor coefficient apl2 and delay it as al2 */ +/* calling parameters: al1, al2, plt, plt1, plt2 */ + al2 = uppol2(al1,al2,plt,plt1,plt2); + +/* uppol1 :update first predictor coefficient apl1 and delay it as al1 */ +/* calling parameters: al1, apl2, plt, plt1 */ + al1 = uppol1(al1,al2,plt,plt1); + +/* recons : compute recontructed signal for adaptive predictor */ + rlt = sl + dlt; + +/* done with lower sub_band encoder; now implement delays for next time*/ + rlt2 = rlt1; + rlt1 = rlt; + plt2 = plt1; + plt1 = plt; + +/* high band encode */ + + szh = filtez(delay_bph,delay_dhx); + + sph = filtep(rh1,ah1,rh2,ah2); + +/* predic: sh = sph + szh */ + sh = sph + szh; +/* subtra: eh = xh - sh */ + eh = xh - sh; + +/* quanth - quantization of difference signal for higher sub-band */ +/* quanth: in-place for speed params: eh, deth (has init. value) */ + if(eh >= 0) { + ih = 3; /* 2,3 are pos codes */ + } + else { + ih = 1; /* 0,1 are neg codes */ + } + decis = (564L*(long)deth) >> 12L; + if(my_abs(eh) > decis) ih--; /* mih = 2 case */ + +/* invqah: compute the quantized difference signal, higher sub-band*/ + dh = ((long)deth*qq2_code2_table[ih]) >> 15L ; + +/* logsch: update logarithmic quantizer scale factor in hi sub-band*/ + nbh = logsch(ih,nbh); + +/* note : scalel and scaleh use same code, different parameters */ + deth = scalel(nbh,10); + +/* parrec - add pole predictor output to quantized diff. signal */ + ph = dh + szh; + +/* upzero: update zero section predictor coefficients (sixth order) */ +/* calling parameters: dh, dhi, bphi */ +/* return params: updated bphi, delayed dhx */ + upzero(dh,delay_dhx,delay_bph); + +/* uppol2: update second predictor coef aph2 and delay as ah2 */ +/* calling params: ah1, ah2, ph, ph1, ph2 */ + ah2 = uppol2(ah1,ah2,ph,ph1,ph2); + +/* uppol1: update first predictor coef. aph2 and delay it as ah1 */ + ah1 = uppol1(ah1,ah2,ph,ph1); + +/* recons for higher sub-band */ + yh = sh + dh; + +/* done with higher sub-band encoder, now Delay for next time */ + rh2 = rh1; + rh1 = yh; + ph2 = ph1; + ph1 = ph; + +/* multiplex ih and il to get signals together */ + return(il | (ih << 6)); +} + +/* decode function, result in xout1 and xout2 */ + +void decode(int input) +{ + int i; + long int xa1,xa2; /* qmf accumulators */ + int *h_ptr,*ac_ptr,*ac_ptr1,*ad_ptr,*ad_ptr1; + +/* split transmitted word from input into ilr and ih */ + ilr = input & 0x3f; + ih = input >> 6; + +/* LOWER SUB_BAND DECODER */ + +/* filtez: compute predictor output for zero section */ + dec_szl = filtez(dec_del_bpl,dec_del_dltx); + +/* filtep: compute predictor output signal for pole section */ + dec_spl = filtep(dec_rlt1,dec_al1,dec_rlt2,dec_al2); + + dec_sl = dec_spl + dec_szl; + +/* invqxl: compute quantized difference signal for adaptive predic */ + dec_dlt = ((long)dec_detl*qq4_code4_table[ilr >> 2]) >> 15; + +/* invqxl: compute quantized difference signal for decoder output */ + dl = ((long)dec_detl*qq6_code6_table[il]) >> 15; + + rl = dl + dec_sl; + +/* logscl: quantizer scale factor adaptation in the lower sub-band */ + dec_nbl = logscl(ilr,dec_nbl); + +/* scalel: computes quantizer scale factor in the lower sub band */ + dec_detl = scalel(dec_nbl,8); + +/* parrec - add pole predictor output to quantized diff. signal */ +/* for partially reconstructed signal */ + dec_plt = dec_dlt + dec_szl; + +/* upzero: update zero section predictor coefficients */ + upzero(dec_dlt,dec_del_dltx,dec_del_bpl); + +/* uppol2: update second predictor coefficient apl2 and delay it as al2 */ + dec_al2 = uppol2(dec_al1,dec_al2,dec_plt,dec_plt1,dec_plt2); + +/* uppol1: update first predictor coef. (pole setion) */ + dec_al1 = uppol1(dec_al1,dec_al2,dec_plt,dec_plt1); + +/* recons : compute recontructed signal for adaptive predictor */ + dec_rlt = dec_sl + dec_dlt; + +/* done with lower sub band decoder, implement delays for next time */ + dec_rlt2 = dec_rlt1; + dec_rlt1 = dec_rlt; + dec_plt2 = dec_plt1; + dec_plt1 = dec_plt; + +/* HIGH SUB-BAND DECODER */ + +/* filtez: compute predictor output for zero section */ + dec_szh = filtez(dec_del_bph,dec_del_dhx); + +/* filtep: compute predictor output signal for pole section */ + dec_sph = filtep(dec_rh1,dec_ah1,dec_rh2,dec_ah2); + +/* predic:compute the predictor output value in the higher sub_band decoder */ + dec_sh = dec_sph + dec_szh; + +/* invqah: in-place compute the quantized difference signal */ + dec_dh = ((long)dec_deth*qq2_code2_table[ih]) >> 15L ; + +/* logsch: update logarithmic quantizer scale factor in hi sub band */ + dec_nbh = logsch(ih,dec_nbh); + +/* scalel: compute the quantizer scale factor in the higher sub band */ + dec_deth = scalel(dec_nbh,10); + +/* parrec: compute partially recontructed signal */ + dec_ph = dec_dh + dec_szh; + +/* upzero: update zero section predictor coefficients */ + upzero(dec_dh,dec_del_dhx,dec_del_bph); + +/* uppol2: update second predictor coefficient aph2 and delay it as ah2 */ + dec_ah2 = uppol2(dec_ah1,dec_ah2,dec_ph,dec_ph1,dec_ph2); + +/* uppol1: update first predictor coef. (pole setion) */ + dec_ah1 = uppol1(dec_ah1,dec_ah2,dec_ph,dec_ph1); + +/* recons : compute recontructed signal for adaptive predictor */ + rh = dec_sh + dec_dh; + +/* done with high band decode, implementing delays for next time here */ + dec_rh2 = dec_rh1; + dec_rh1 = rh; + dec_ph2 = dec_ph1; + dec_ph1 = dec_ph; + +/* end of higher sub_band decoder */ + +/* end with receive quadrature mirror filters */ + xd = rl - rh; + xs = rl + rh; + +/* receive quadrature mirror filters implemented here */ + h_ptr = h; + ac_ptr = accumc; + ad_ptr = accumd; + xa1 = (long)xd * (*h_ptr++); + xa2 = (long)xs * (*h_ptr++); +/* main multiply accumulate loop for samples and coefficients */ + for(i = 0 ; i < 10 ; i++) { + xa1 += (long)(*ac_ptr++) * (*h_ptr++); + xa2 += (long)(*ad_ptr++) * (*h_ptr++); + } +/* final mult/accumulate */ + xa1 += (long)(*ac_ptr) * (*h_ptr++); + xa2 += (long)(*ad_ptr) * (*h_ptr++); + +/* scale by 2^14 */ + xout1 = xa1 >> 14; + xout2 = xa2 >> 14; + +/* update delay lines */ + ac_ptr1 = ac_ptr - 1; + ad_ptr1 = ad_ptr - 1; + for(i = 0 ; i < 10 ; i++) { + *ac_ptr-- = *ac_ptr1--; + *ad_ptr-- = *ad_ptr1--; + } + *ac_ptr = xd; + *ad_ptr = xs; + + return; +} + +/* clear all storage locations */ + +void reset() +{ + int i; + + detl = dec_detl = 32; /* reset to min scale factor */ + deth = dec_deth = 8; + nbl = al1 = al2 = plt1 = plt2 = rlt1 = rlt2 = 0; + nbh = ah1 = ah2 = ph1 = ph2 = rh1 = rh2 = 0; + dec_nbl = dec_al1 = dec_al2 = dec_plt1 = dec_plt2 = dec_rlt1 = dec_rlt2 = 0; + dec_nbh = dec_ah1 = dec_ah2 = dec_ph1 = dec_ph2 = dec_rh1 = dec_rh2 = 0; + + for(i = 0 ; i < 6 ; i++) { + delay_dltx[i] = 0; + delay_dhx[i] = 0; + dec_del_dltx[i] = 0; + dec_del_dhx[i] = 0; + } + + for(i = 0 ; i < 6 ; i++) { + delay_bpl[i] = 0; + delay_bph[i] = 0; + dec_del_bpl[i] = 0; + dec_del_bph[i] = 0; + } + + for(i = 0 ; i < 23 ; i++) tqmf[i] = 0; + + for(i = 0 ; i < 11 ; i++) { + accumc[i] = 0; + accumd[i] = 0; + } + return; +} + +/* filtez - compute predictor output signal (zero section) */ +/* input: bpl1-6 and dlt1-6, output: szl */ + +int filtez(int *bpl,int *dlt) +{ + int i; + long int zl; + zl = (long)(*bpl++) * (*dlt++); + /* MAX: 6 */ + for(i = 1 ; i < 6 ; i++) + zl += (long)(*bpl++) * (*dlt++); + + return((int)(zl >> 14)); /* x2 here */ +} + +/* filtep - compute predictor output signal (pole section) */ +/* input rlt1-2 and al1-2, output spl */ + +int filtep(int rlt1,int al1,int rlt2,int al2) +{ + long int pl,pl2; + pl = 2*rlt1; + pl = (long)al1*pl; + pl2 = 2*rlt2; + pl += (long)al2*pl2; + return((int)(pl >> 15)); +} + +/* quantl - quantize the difference signal in the lower sub-band */ +int quantl(int el,int detl) +{ + int ril,mil; + long int wd,decis; + +/* abs of difference signal */ + wd = my_abs(el); +/* determine mil based on decision levels and detl gain */ + /* MAX: 30 */ + for(mil = 0 ; mil < 30 ; mil++) { + decis = (decis_levl[mil]*(long)detl) >> 15L; + if(wd <= decis) break; + } +/* if mil=30 then wd is less than all decision levels */ + if(el >= 0) ril = quant26bt_pos[mil]; + else ril = quant26bt_neg[mil]; + return(ril); +} + +/* invqxl is either invqbl or invqal depending on parameters passed */ +/* returns dlt, code table is pre-multiplied by 8 */ + +/* int invqxl(int il,int detl,int *code_table,int mode) */ +/* { */ +/* long int dlt; */ +/* dlt = (long)detl*code_table[il >> (mode-1)]; */ +/* return((int)(dlt >> 15)); */ +/* } */ + +/* logscl - update log quantizer scale factor in lower sub-band */ +/* note that nbl is passed and returned */ + +int logscl(int il,int nbl) +{ + long int wd; + wd = ((long)nbl * 127L) >> 7L; /* leak factor 127/128 */ + nbl = (int)wd + wl_code_table[il >> 2]; + if(nbl < 0) nbl = 0; + if(nbl > 18432) nbl = 18432; + return(nbl); +} + +/* scalel: compute quantizer scale factor in lower or upper sub-band*/ + +int scalel(int nbl,int shift_constant) +{ + int wd1,wd2,wd3; + wd1 = (nbl >> 6) & 31; + wd2 = nbl >> 11; + wd3 = ilb_table[wd1] >> (shift_constant + 1 - wd2); + return(wd3 << 3); +} + +/* upzero - inputs: dlt, dlti[0-5], bli[0-5], outputs: updated bli[0-5] */ +/* also implements delay of bli and update of dlti from dlt */ + +void upzero(int dlt,int *dlti,int *bli) +{ + int i,wd2,wd3; +/*if dlt is zero, then no sum into bli */ + if(dlt == 0) { + for(i = 0 ; i < 6 ; i++) { + bli[i] = (int)((255L*bli[i]) >> 8L); /* leak factor of 255/256 */ + } + } + else { + for(i = 0 ; i < 6 ; i++) { + if((long)dlt*dlti[i] >= 0) wd2 = 128; else wd2 = -128; + wd3 = (int)((255L*bli[i]) >> 8L); /* leak factor of 255/256 */ + bli[i] = wd2 + wd3; + } + } +/* implement delay line for dlt */ + dlti[5] = dlti[4]; + dlti[4] = dlti[3]; + dlti[3] = dlti[2]; + dlti[1] = dlti[0]; + dlti[0] = dlt; + return; +} + +/* uppol2 - update second predictor coefficient (pole section) */ +/* inputs: al1, al2, plt, plt1, plt2. outputs: apl2 */ + +int uppol2(int al1,int al2,int plt,int plt1,int plt2) +{ + long int wd2,wd4; + int apl2; + wd2 = 4L*(long)al1; + if((long)plt*plt1 >= 0L) wd2 = -wd2; /* check same sign */ + wd2 = wd2 >> 7; /* gain of 1/128 */ + if((long)plt*plt2 >= 0L) { + wd4 = wd2 + 128; /* same sign case */ + } + else { + wd4 = wd2 - 128; + } + apl2 = wd4 + (127L*(long)al2 >> 7L); /* leak factor of 127/128 */ + +/* apl2 is limited to +-.75 */ + if(apl2 > 12288) apl2 = 12288; + if(apl2 < -12288) apl2 = -12288; + return(apl2); +} + +/* uppol1 - update first predictor coefficient (pole section) */ +/* inputs: al1, apl2, plt, plt1. outputs: apl1 */ + +int uppol1(int al1,int apl2,int plt,int plt1) +{ + long int wd2; + int wd3,apl1; + wd2 = ((long)al1*255L) >> 8L; /* leak factor of 255/256 */ + if((long)plt*plt1 >= 0L) { + apl1 = (int)wd2 + 192; /* same sign case */ + } + else { + apl1 = (int)wd2 - 192; + } +/* note: wd3= .9375-.75 is always positive */ + wd3 = 15360 - apl2; /* limit value */ + if(apl1 > wd3) apl1 = wd3; + if(apl1 < -wd3) apl1 = -wd3; + return(apl1); +} + +/* INVQAH: inverse adaptive quantizer for the higher sub-band */ +/* returns dh, code table is pre-multiplied by 8 */ + +/* int invqah(int ih,int deth) */ +/* { */ +/* long int rdh; */ +/* rdh = ((long)deth*qq2_code2_table[ih]) >> 15L ; */ +/* return((int)(rdh )); */ +/* } */ + +/* logsch - update log quantizer scale factor in higher sub-band */ +/* note that nbh is passed and returned */ + +int logsch(int ih,int nbh) +{ + int wd; + wd = ((long)nbh * 127L) >> 7L; /* leak factor 127/128 */ + nbh = wd + wh_code_table[ih]; + if(nbh < 0) nbh = 0; + if(nbh > 22528) nbh = 22528; + return(nbh); +} + + +#ifndef Seoul_Mate +int main() +{ + int i,j,f/*,answer*/; + static int test_data[SIZE*2],compressed[SIZE],result[SIZE*2]; + +/* reset, initialize required memory */ + reset(); + +/* read in amplitude and frequency for test data */ + /* scanf("%d",&j); + scanf("%d",&f); */ + j = 10; f = 2000; /* körs men, används inte */ + +/* 16 KHz sample rate */ + /* XXmain_0, MAX: 2 */ + /* Since the number of times we loop in my_sin depends on the argument we + add the fact: xxmain_0:[]: */ + for(i = 0 ; i < SIZE ; i++) { + test_data[i] = (int)j*my_cos(f*PI*i); + } + + + + /* MAX: 2 */ + +/*******Antar att test_data[0] = 10 och test_data[1]=-6 från ovan, ******* + och att anropet i forloopen blir encode(test_data[0],test_data[0]); + och encode(test_data[1],test_data[1]), eftersom att den annars går + *******över array gränsen *******/ + + + for(i = 0 ; i < IN_END ; i += 2) + compressed[i/2] = encode(test_data[i],test_data[i+1]); + /* MAX: 2 */ + for(i = 0 ; i < IN_END ; i += 2) { + decode(compressed[i/2]); + result[i] = xout1; + result[i+1] = xout2; + } +/* +for( ; j < 32767 ; j++) { +i=IN_END-1; +printf("\n%4d %4d %4d %4d %4d",j,compressed[i/2] >> 6,compressed[i/2] & 63,result[i],result[i-1]); +} +*/ +/* print ih, il */ +/* + for(i = 0 ; i < IN_END/2 ; i++) printf("\n%4d %2d %2d", + i,compressed[i] >> 6,compressed[i] & 63); +*/ + + return result[i]+result[i+1]; +} +#endif + + + diff --git a/test/src/bs.c b/test/src/bs.c new file mode 100755 index 0000000..9f247fb --- /dev/null +++ b/test/src/bs.c @@ -0,0 +1,113 @@ +/*************************************************************************/ +/* */ +/* SNU-RT Benchmark Suite for Worst Case Timing Analysis */ +/* ===================================================== */ +/* Collected and Modified by S.-S. Lim */ +/* sslim@archi.snu.ac.kr */ +/* Real-Time Research Group */ +/* Seoul National University */ +/* */ +/* */ +/* < Features > - restrictions for our experimental environment */ +/* */ +/* 1. Completely structured. */ +/* - There are no unconditional jumps. */ +/* - There are no exit from loop bodies. */ +/* (There are no 'break' or 'return' in loop bodies) */ +/* 2. No 'switch' statements. */ +/* 3. No 'do..while' statements. */ +/* 4. Expressions are restricted. */ +/* - There are no multiple expressions joined by 'or', */ +/* 'and' operations. */ +/* 5. No library calls. */ +/* - All the functions needed are implemented in the */ +/* source file. */ +/* */ +/* */ +/*************************************************************************/ +/* */ +/* FILE: bs.c */ +/* SOURCE : Public Domain Code */ +/* */ +/* DESCRIPTION : */ +/* */ +/* Binary search for the array of 15 integer elements. */ +/* */ +/* REMARK : */ +/* */ +/* EXECUTION TIME : */ +/* */ +/* */ +/*************************************************************************/ + + + + +struct DATA { + int key; + int value; +} ; + +#ifdef DEBUG + int cnt1; +#endif + +struct DATA data[15] = { {1, 100}, + {5,200}, + {6, 300}, + {7, 700}, + {8, 900}, + {9, 250}, + {10, 400}, + {11, 600}, + {12, 800}, + {13, 1500}, + {14, 1200}, + {15, 110}, + {16, 140}, + {17, 133}, + {18, 10} }; + +main() +{ + binary_search(8); +} + +binary_search(x) +{ + int fvalue, mid, up, low ; + + low = 0; + up = 14; + fvalue = -1 /* all data are positive */ ; + while (low <= up) { + mid = (low + up) >> 1; + if ( data[mid].key == x ) { /* found */ + up = low - 1; + fvalue = data[mid].value; +#ifdef DEBUG + printf("FOUND!!\n"); +#endif + } + else /* not found */ + if ( data[mid].key > x ) { + up = mid - 1; +#ifdef DEBUG + printf("MID-1\n"); +#endif + } + else { + low = mid + 1; +#ifdef DEBUG + printf("MID+1\n"); +#endif + } +#ifdef DEBUG + cnt1++; +#endif + } +#ifdef DEBUG + printf("Loop Count : %d\n", cnt1); +#endif + return fvalue; +} diff --git a/test/src/bsort100.c b/test/src/bsort100.c new file mode 100755 index 0000000..8df4c0c --- /dev/null +++ b/test/src/bsort100.c @@ -0,0 +1,127 @@ +/* bsort100.c */ + +/* All output disabled for wcsim */ +#define WCSIM 1 + +/* A read from this address will result in an known value of 1 */ +#define KNOWN_VALUE (int)(*((char *)0x80200001)) + +/* A read from this address will result in an unknown value */ +#define UNKNOWN_VALUE (int)(*((char *)0x80200003)) + + +#include +#include +#include + +#define WORSTCASE 1 +#define FALSE 0 +#define TRUE 1 +#define NUMELEMS 100 +#define MAXDIM (NUMELEMS+1) + +/* BUBBLESORT BENCHMARK PROGRAM: + * This program tests the basic loop constructs, integer comparisons, + * and simple array handling of compilers by sorting 10 arrays of + * randomly generated integers. + */ + +int Array[MAXDIM], Seed; +int factor; + +main() +{ + long StartTime, StopTime; + float TotalTime; + +#ifndef WCSIM + printf("\n *** BUBBLE SORT BENCHMARK TEST ***\n\n"); + printf("RESULTS OF TEST:\n\n"); +#endif + Initialize(Array); + /* StartTime = ttime (); */ + BubbleSort(Array); + /* StopTime = ttime(); */ + /* TotalTime = (StopTime - StartTime) / 1000.0; */ +#ifndef WCSIM + printf(" - Number of elements sorted is %d\n", NUMELEMS); + printf(" - Total time sorting is %3.3f seconds\n\n", TotalTime); +#endif +} + + +int ttime() +/* + * This function returns in milliseconds the amount of compiler time + * used prior to it being called. + */ +{ + struct tms buffer; + int utime; + + /* times(&buffer); not implemented */ + utime = (buffer.tms_utime / 60.0) * 1000.0; + return(utime); +} + + +Initialize(Array) +int Array[]; +/* + * Initializes given array with randomly generated integers. + */ +{ + int Index, fact; + +#ifdef WORSTCASE + factor = -1; +#else + factor = 1; +#endif + +fact = factor; +for (Index = 1; Index <= NUMELEMS; Index ++) + Array[Index] = Index*fact * KNOWN_VALUE; +} + + + +BubbleSort(Array) +int Array[]; +/* + * Sorts an array of integers of size NUMELEMS in ascending order. + */ +{ + int Sorted = FALSE; + int Temp, LastIndex, Index, i; + + for (i = 1; + i <= NUMELEMS-1; /* apsim_loop 1 0 */ + i++) + { + Sorted = TRUE; + for (Index = 1; + Index <= NUMELEMS-1; /* apsim_loop 10 1 */ + Index ++) { + if (Index > NUMELEMS-i) + break; + if (Array[Index] > Array[Index + 1]) + { + Temp = Array[Index]; + Array[Index] = Array[Index+1]; + Array[Index+1] = Temp; + Sorted = FALSE; + } + } + + if (Sorted) + break; + } + +#ifndef WCSIM + if (Sorted || i == 1) + fprintf(stderr, "array was successfully sorted in %d passes\n", i-1); + else + fprintf(stderr, "array was unsuccessfully sorted in %d passes\n", i-1); +#endif +} diff --git a/test/src/cnt.c b/test/src/cnt.c new file mode 100755 index 0000000..dfec627 --- /dev/null +++ b/test/src/cnt.c @@ -0,0 +1,133 @@ +/* $Id: cnt.c,v 1.3 2005/04/04 11:34:58 csg Exp $ */ + +/* sumcntmatrix.c */ + +//#include +//#include + +// #define WORSTCASE 1 +// #define MAXSIZE 100 Changed JG/Ebbe +#define MAXSIZE 10 + +// Typedefs +typedef int matrix [MAXSIZE][MAXSIZE]; + +// Forwards declarations +int main(void); +int Test(matrix); +int Initialize(matrix); +int InitSeed(void); +void Sum(matrix); +int RandomInteger(void); + +// Globals +int Seed; +matrix Array; +int Postotal, Negtotal, Poscnt, Negcnt; + +// The main function +int main (void) +{ + InitSeed(); + //printf("\n *** MATRIX SUM AND COUNT BENCHMARK TEST ***\n\n"); + //printf("RESULTS OF THE TEST:\n"); + Test(Array); + return 1; +} + + +int Test(matrix Array) +{ + long StartTime, StopTime; + float TotalTime; + + Initialize(Array); + StartTime = 1000.0; //ttime(); + Sum(Array); + StopTime = 1500.0; //ttime(); + + TotalTime = (StopTime - StartTime) / 1000.0; + + //printf(" - Size of array is %d\n", MAXSIZE); + //printf(" - Num pos was %d and Sum was %d\n", Poscnt, Postotal); + //printf(" - Num neg was %d and Sum was %d\n", Negcnt, Negtotal); + //printf(" - Num neg was %d\n", Negcnt); + //printf(" - Total sum time is %3.3f seconds\n\n", TotalTime); + return 0; +} + + +// Intializes the given array with random integers. +int Initialize(matrix Array) +{ + register int OuterIndex, InnerIndex; + + for (OuterIndex = 0; OuterIndex < MAXSIZE; OuterIndex++) //100 + 1 + for (InnerIndex = 0; InnerIndex < MAXSIZE; InnerIndex++) //100 + 1 + Array[OuterIndex][InnerIndex] = RandomInteger(); + + return 0; +} + + +// Initializes the seed used in the random number generator. +int InitSeed (void) +{ + Seed = 0; + return 0; +} + +void Sum(matrix Array) +{ + register int Outer, Inner; + + int Ptotal = 0; /* changed these to locals in order to drive worst case */ + int Ntotal = 0; + int Pcnt = 0; + int Ncnt = 0; + + for (Outer = 0; Outer < MAXSIZE; Outer++) //Maxsize = 100 + for (Inner = 0; Inner < MAXSIZE; Inner++) +#ifdef WORSTCASE + if (Array[Outer][Inner] >= 0) { +#else + if (Array[Outer][Inner] < 0) { +#endif + Ptotal += Array[Outer][Inner]; + Pcnt++; + } + else { + Ntotal += Array[Outer][Inner]; + Ncnt++; + } + + Postotal = Ptotal; + Poscnt = Pcnt; + Negtotal = Ntotal; + Negcnt = Ncnt; +} + + +// This function returns in milliseconds the amount of compiler time +//int ttime() +//{ +// struct tms buffer; +//int utime; + +//times(&buffer); +//utime = (buffer.tms_utime / 60.0) * 1000.0; +//return (utime); +//} + + +// Generates random integers between 0 and 8095 +int RandomInteger(void) +{ + Seed = ((Seed * 133) + 81) % 8095; + return Seed; +} + + + + + diff --git a/test/src/compress.c b/test/src/compress.c new file mode 100755 index 0000000..b62a559 --- /dev/null +++ b/test/src/compress.c @@ -0,0 +1,521 @@ +/* MDH WCET BENCHMARK SUITE. File version $Id: compress.c,v 1.7 2005/12/21 09:37:18 jgn Exp $ */ + +/* + * Compress - data compression program + * + * Adopted from SPEC95 for WCET-calculation by Thomas Lundqvist, 1997-11-28. + * Only compression is done on a buffer (small one) containing + * totally random data. This should come closer to a worst case + * compared to the original SPEC95-version. + * + * All unused code removed by Jakob Engblom, february 2000. Cleaned + * up for IAR compilation. + * + * Removed the prototype declaration of "code_int getcode();" that is + * niether defined nor used. Christer Sandberg + * + * Changes: + * JG 2005/12/20: Changed declaration of maxmaxcode to avoid warning + * JG 2012/09/28: Comment within comment removed + */ + +/* #define DO_TRACING */ + +#ifdef DO_TRACING /* ON PC */ + +#include +#define TRACE(x) trace((x)) +#undef TEST /* finished testing! */ + +/* +void trace(char *s) +{ + printf("%s\n",s); +} +*/ + +#else /* ON TARGET */ + +#define TRACE(x) +#undef TEST + +#endif + + +#define BUFFERSIZE 50 +#define IN_COUNT BUFFERSIZE + +#define HSIZE 257 /* 95% occupancy */ +#define BITS 16 +#define INIT_BITS 9 /* initial number of bits/code */ + + + +/*----------------------------------------------------------------------*/ +/*----------------------------------------------------------------------*/ +/*----------------------------------------------------------------------*/ + +#define min(a,b) ((a>b) ? b : a) + +/* + * Set USERMEM to the maximum amount of physical user memory available + * in bytes. USERMEM is used to determine the maximum BITS that can be used + * for compression. + * + * SACREDMEM is the amount of physical memory saved for others; compress + * will hog the rest. + */ +/* For SPEC95 use, SACREDMEM automatically set to 0. + Jeff Reilly, 1/15/95 */ + +#define SACREDMEM 0 + +/* For SPEC95 use, USERMEM automatically set to 450000. + Jeff Reilly, 1/15/95 */ +# define USERMEM 450000 /* default user memory */ + +#ifdef interdata /* (Perkin-Elmer) */ +#define SIGNED_COMPARE_SLOW /* signed compare is slower than unsigned */ +#endif + +/* For SPEC95 use, PBITS and BITS automatically set to 16. + Jeff Reilyy, 1/15/95 */ +#define PBITS 16 +#define BITS 16 +#define HSIZE 257 /* 95% occupancy was 69001 */ + + +/* + * a code_int must be able to hold 2**BITS values of type int, and also -1 + */ +#if BITS > 15 +typedef long int code_int; +#else +typedef int code_int; +#endif + +#ifdef SIGNED_COMPARE_SLOW +typedef unsigned long int count_int; +typedef unsigned short int count_short; +#else +typedef long int count_int; +#endif + +typedef unsigned char char_type; + +/* Defines for third byte of header */ +#define BIT_MASK 0x1f +#define BLOCK_MASK 0x80 +/* Masks 0x40 and 0x20 are free. I think 0x20 should mean that there is + a fourth header byte (for expansion). +*/ + +/* Global variables */ +int n_bits; /* number of bits/code */ +int maxbits = BITS; /* user settable max # bits/code */ +code_int maxcode; /* maximum code, given n_bits */ +#if BITS > 15 +code_int maxmaxcode = 1L << BITS; /* should NEVER generate this code */ +#else +code_int maxmaxcode = 1 << BITS; /* should NEVER generate this code */ +#endif + +# define MAXCODE(n_bits) ((1 << (n_bits)) - 1) + + +#define htabof(i) htab[i] +#define codetabof(i) codetab[i] + +code_int hsize = HSIZE; /* for dynamic table sizing */ +count_int fsize; + +/* + * To save much memory, we overlay the table used by compress() with those + * used by decompress(). The tab_prefix table is the same size and type + * as the codetab. The tab_suffix table needs 2**BITS characters. We + * get this from the beginning of htab. The output stack uses the rest + * of htab, and contains characters. There is plenty of room for any + * possible stack (stack used to be 8000 characters). + */ + +#define tab_prefixof(i) codetabof(i) +#define tab_suffixof(i) ((char_type *)(htab))[i] +#define de_stack ((char_type *)&tab_suffixof(1< BITS) maxbits = BITS;*/ + maxbits = BITS; + maxmaxcode = 1 << maxbits; + + InCnt = count; + apsim_InCnt = IN_COUNT + 3; + InBuff = (unsigned char *)orig_text_buffer; + OutBuff = (unsigned char *)comp_text_buffer; + + compress(); + + return (0); + +} + + + +void initbuffer(void) +{ + int seed = 1; + int i; + int tabort; + + for (i = 0 ; i < BUFFERSIZE ; i++) { + /* Generates random integers between 0 and 8095 */ + tabort = i; + seed = ((seed * 133) + 81) % 8095; + + orig_text_buffer[i] = seed % 256; + } +} + + + +static int offset; +long int in_count = 1; /* length of input */ +long int bytes_out; /* length of compressed output */ +long int out_count = 0; /* # of codes output (for debugging) */ + + +void compress(void) +{ + register long fcode; + register code_int i = 0; + register int c; + register code_int ent; + register int disp; + register code_int hsize_reg; + register int hshift; + + + offset = 0; + bytes_out = 3; /* includes 3-byte header mojo */ + out_count = 0; + clear_flg = 0; + ratio = 0; + in_count = 1; + checkpoint = CHECK_GAP; + maxcode = MAXCODE(n_bits = INIT_BITS); + free_ent = ((block_compress) ? (FIRST) : (256) ); + + ent = getbyte (); + + hshift = 0; + for ( fcode = (long) hsize; fcode < 65536L; fcode *= 2L ) + { + hshift++; + } + + hshift = 8 - hshift; /* set hash code range bound */ + + hsize_reg = hsize; + cl_hash( (count_int) hsize_reg); /* clear hash table */ + + + while ( InCnt > 0 ) /* apsim_loop 11 0 */ + { + int apsim_bound111 = 0; + + c = getbyte(); /* decrements InCnt */ + + in_count++; + fcode = (long) (((long) c << maxbits) + ent); + i = ((c << hshift) ^ ent); /* xor hashing */ + + if ( htabof (i) == fcode ) { + ent = codetabof (i); + continue; + } else if ( (long)htabof (i) < 0 ) { /* empty slot */ + goto nomatch; + } + + + disp = hsize_reg - i; /* secondary hash (after G. Knott) */ + if ( i == 0 ) { + disp = 1; + } + +probe: + + if ( (i -= disp) < 0 ) { /* apsim_loop 111 11 */ + i += hsize_reg; + } + + if ( htabof (i) == fcode ) { + ent = codetabof (i); + continue; + } + + if ( (long)htabof (i) > 0 && (++apsim_bound111 < in_count) ) + goto probe; +nomatch: + + out_count++; + ent = c; + if ( free_ent < maxmaxcode ) { + codetabof (i) = free_ent++; /* apsim_unknown codetab */ + htabof (i) = fcode; /* apsim_unknown htab */ + } else if ( ((count_int)in_count >= checkpoint) && (block_compress) ) { + cl_block (); + } + + } + if(bytes_out > in_count) { /* exit(2) if no savings */ + exit_stat = 2; + } + return; +} + + +void cl_block (void) /* table clear for block compress */ +{ + register long int rat; + + checkpoint = in_count + CHECK_GAP; + + if(in_count > 0x007fffff) { /* shift will overflow */ + + rat = bytes_out >> 8; + if(rat == 0) { /* Don't divide by zero */ + rat = 0x7fffffff; + } else { + rat = in_count / rat; + } + } else { + rat = (in_count << 8) / bytes_out; /* 8 fractional bits */ + } + if ( rat > ratio ) { + ratio = rat; + } else { + ratio = 0; + cl_hash ( (count_int) hsize ); + + + free_ent = FIRST; + clear_flg = 1; + output ( (code_int) CLEAR ); + } +} + +void cl_hash(count_int hsize) /* reset code table */ +{ + register count_int *htab_p = htab+hsize; + register long i; + register long m1 = -1; + + i = hsize - 16; + do { /* might use Sys V memset(3) here */ + + *(htab_p-16) = m1; + *(htab_p-15) = m1; + *(htab_p-14) = m1; + *(htab_p-13) = m1; + *(htab_p-12) = m1; + *(htab_p-11) = m1; + *(htab_p-10) = m1; + *(htab_p-9) = m1; + *(htab_p-8) = m1; + *(htab_p-7) = m1; + *(htab_p-6) = m1; + *(htab_p-5) = m1; + *(htab_p-4) = m1; + *(htab_p-3) = m1; + *(htab_p-2) = m1; + *(htab_p-1) = m1; + htab_p -= 16; + } while ((i -= 16) >= 0); + for ( i += 16; i > 0; i-- ) { + *--htab_p = m1; + } +} + + + +unsigned int getbyte(void) +{ + if( InCnt > 0 && (apsim_InCnt-- > 0)) { + InCnt--; + return( (unsigned int)*InBuff++ ); + } else { + return( -1 ); + } +} + +void putbyte( char c ) +{ + *OutBuff++ = c; /* apsim_unknown comp_text_buffer */ +} + + +void writebytes( char *buf, int n ) +{ + int i; + for( i=0; (i= 0 ) { + /* + * byte/bit numbering on the VAX is simulated by the following code + */ + /* + * Get to the first byte. + */ + bp += (r_off >> 3); + r_off &= 7; + /* + * Since code is always >= 8 bits, only need to mask the first + * hunk on the left. + */ + *bp = ((*bp & rmask[r_off]) | (code << r_off)) & lmask[r_off]; /* apsim_unknown buf */ + bp++; + bits -= (8 - r_off); + code >>= 8 - r_off; + /* Get any 8 bit parts in the middle (<=1 for up to 16 bits). */ + if ( bits >= 8 ) { + + *bp++ = code; /* apsim_unknown buf */ + code >>= 8; + bits -= 8; + } + + /* Last bits. */ + if(bits) { + *bp = code; /* apsim_unknown buf */ + } + + offset += n_bits; + if ( offset == (n_bits << 3) ) { + bp = buf; + bits = n_bits; + bytes_out += bits; + do { + putbyte(*bp++); + } while(( --bits) && ((bp - buf < BITS))); + offset = 0; + } + /* + * If the next entry is going to be too big for the code size, + * then increase it, if possible. + */ + if ( free_ent > maxcode || ((clear_flg > 0))) { + /* + * Write the whole buffer, because the input side won't + * discover the size increase until after it has read it. + */ + if ( offset > 0 ) { + writebytes( buf, n_bits ); + bytes_out += n_bits; + } + offset = 0; + if ( clear_flg ) { + maxcode = MAXCODE (n_bits = INIT_BITS); + clear_flg = 0; + } else { + n_bits++; + if ( n_bits == maxbits ) + { + maxcode = maxmaxcode; + } + else + { + maxcode = MAXCODE(n_bits); + } + } + } + } else { + /* + * At EOF, write the rest of the buffer. + */ + if ( offset > 0 ) + { + writebytes( buf, ((offset + 7) / 8) ); + } + bytes_out += (offset + 7) / 8; + offset = 0; + } +} diff --git a/test/src/cover.c b/test/src/cover.c new file mode 100755 index 0000000..2ace3d5 --- /dev/null +++ b/test/src/cover.c @@ -0,0 +1,238 @@ +int swi120(int c) +{ + int i; + for (i=0; i<120; i++) { + switch (i) { + case 0: c++; break; + case 1: c++; break; + case 2: c++; break; + case 3: c++; break; + case 4: c++; break; + case 5: c++; break; + case 6: c++; break; + case 7: c++; break; + case 8: c++; break; + case 9: c++; break; + case 10: c++; break; + case 11: c++; break; + case 12: c++; break; + case 13: c++; break; + case 14: c++; break; + case 15: c++; break; + case 16: c++; break; + case 17: c++; break; + case 18: c++; break; + case 19: c++; break; + case 20: c++; break; + case 21: c++; break; + case 22: c++; break; + case 23: c++; break; + case 24: c++; break; + case 25: c++; break; + case 26: c++; break; + case 27: c++; break; + case 28: c++; break; + case 29: c++; break; + case 30: c++; break; + case 31: c++; break; + case 32: c++; break; + case 33: c++; break; + case 34: c++; break; + case 35: c++; break; + case 36: c++; break; + case 37: c++; break; + case 38: c++; break; + case 39: c++; break; + case 40: c++; break; + case 41: c++; break; + case 42: c++; break; + case 43: c++; break; + case 44: c++; break; + case 45: c++; break; + case 46: c++; break; + case 47: c++; break; + case 48: c++; break; + case 49: c++; break; + case 50: c++; break; + case 51: c++; break; + case 52: c++; break; + case 53: c++; break; + case 54: c++; break; + case 55: c++; break; + case 56: c++; break; + case 57: c++; break; + case 58: c++; break; + case 59: c++; break; + case 60: c++; break; + case 61: c++; break; + case 62: c++; break; + case 63: c++; break; + case 64: c++; break; + case 65: c++; break; + case 66: c++; break; + case 67: c++; break; + case 68: c++; break; + case 69: c++; break; + case 70: c++; break; + case 71: c++; break; + case 72: c++; break; + case 73: c++; break; + case 74: c++; break; + case 75: c++; break; + case 76: c++; break; + case 77: c++; break; + case 78: c++; break; + case 79: c++; break; + case 80: c++; break; + case 81: c++; break; + case 82: c++; break; + case 83: c++; break; + case 84: c++; break; + case 85: c++; break; + case 86: c++; break; + case 87: c++; break; + case 88: c++; break; + case 89: c++; break; + case 90: c++; break; + case 91: c++; break; + case 92: c++; break; + case 93: c++; break; + case 94: c++; break; + case 95: c++; break; + case 96: c++; break; + case 97: c++; break; + case 98: c++; break; + case 99: c++; break; + case 100: c++; break; + case 101: c++; break; + case 102: c++; break; + case 103: c++; break; + case 104: c++; break; + case 105: c++; break; + case 106: c++; break; + case 107: c++; break; + case 108: c++; break; + case 109: c++; break; + case 110: c++; break; + case 111: c++; break; + case 112: c++; break; + case 113: c++; break; + case 114: c++; break; + case 115: c++; break; + case 116: c++; break; + case 117: c++; break; + case 118: c++; break; + case 119: c++; break; + default: c--; break; + } + } + return c; +} + + +int swi50(int c) +{ + int i; + for (i=0; i<50; i++) { + switch (i) { + case 0: c++; break; + case 1: c++; break; + case 2: c++; break; + case 3: c++; break; + case 4: c++; break; + case 5: c++; break; + case 6: c++; break; + case 7: c++; break; + case 8: c++; break; + case 9: c++; break; + case 10: c++; break; + case 11: c++; break; + case 12: c++; break; + case 13: c++; break; + case 14: c++; break; + case 15: c++; break; + case 16: c++; break; + case 17: c++; break; + case 18: c++; break; + case 19: c++; break; + case 20: c++; break; + case 21: c++; break; + case 22: c++; break; + case 23: c++; break; + case 24: c++; break; + case 25: c++; break; + case 26: c++; break; + case 27: c++; break; + case 28: c++; break; + case 29: c++; break; + case 30: c++; break; + case 31: c++; break; + case 32: c++; break; + case 33: c++; break; + case 34: c++; break; + case 35: c++; break; + case 36: c++; break; + case 37: c++; break; + case 38: c++; break; + case 39: c++; break; + case 40: c++; break; + case 41: c++; break; + case 42: c++; break; + case 43: c++; break; + case 44: c++; break; + case 45: c++; break; + case 46: c++; break; + case 47: c++; break; + case 48: c++; break; + case 49: c++; break; + case 50: c++; break; + case 51: c++; break; + case 52: c++; break; + case 53: c++; break; + case 54: c++; break; + case 55: c++; break; + case 56: c++; break; + case 57: c++; break; + case 58: c++; break; + case 59: c++; break; + default: c--; break; + } + } + return c; +} + + +int swi10(int c) +{ + int i; + for (i=0; i<10; i++) { + switch (i) { + case 0: c++; break; + case 1: c++; break; + case 2: c++; break; + case 3: c++; break; + case 4: c++; break; + case 5: c++; break; + case 6: c++; break; + case 7: c++; break; + case 8: c++; break; + case 9: c++; break; + default: c--; break; + } + } + return c; +} + +int main() +{ + volatile int cnt=0; + + cnt=swi10(cnt); + cnt=swi50(cnt); + cnt=swi120(cnt); + + /* printf("cnt: %d\n", cnt); */ + + return cnt; + +} diff --git a/test/src/crc.c b/test/src/crc.c new file mode 100755 index 0000000..f3106b8 --- /dev/null +++ b/test/src/crc.c @@ -0,0 +1,127 @@ +/* $Id: crc.c,v 1.2 2005/04/04 11:34:58 csg Exp $ */ + +/*************************************************************************/ +/* */ +/* SNU-RT Benchmark Suite for Worst Case Timing Analysis */ +/* ===================================================== */ +/* Collected and Modified by S.-S. Lim */ +/* sslim@archi.snu.ac.kr */ +/* Real-Time Research Group */ +/* Seoul National University */ +/* */ +/* */ +/* < Features > - restrictions for our experimental environment */ +/* */ +/* 1. Completely structured. */ +/* - There are no unconditional jumps. */ +/* - There are no exit from loop bodies. */ +/* (There are no 'break' or 'return' in loop bodies) */ +/* 2. No 'switch' statements. */ +/* 3. No 'do..while' statements. */ +/* 4. Expressions are restricted. */ +/* - There are no multiple expressions joined by 'or', */ +/* 'and' operations. */ +/* 5. No library calls. */ +/* - All the functions needed are implemented in the */ +/* source file. */ +/* */ +/* */ +/*************************************************************************/ +/* */ +/* FILE: crc.c */ +/* SOURCE : Numerical Recipes in C - The Second Edition */ +/* */ +/* DESCRIPTION : */ +/* */ +/* A demonstration for CRC (Cyclic Redundancy Check) operation. */ +/* The CRC is manipulated as two functions, icrc1 and icrc. */ +/* icrc1 is for one character and icrc uses icrc1 for a string. */ +/* The input string is stored in array lin[]. */ +/* icrc is called two times, one for X-Modem string CRC and the */ +/* other for X-Modem packet CRC. */ +/* */ +/* REMARK : */ +/* */ +/* EXECUTION TIME : */ +/* */ +/* */ +/*************************************************************************/ + + +typedef unsigned char uchar; +#define LOBYTE(x) ((uchar)((x) & 0xFF)) +#define HIBYTE(x) ((uchar)((x) >> 8)) + +unsigned char lin[256] = "asdffeagewaHAFEFaeDsFEawFdsFaefaeerdjgp"; + +unsigned short icrc1(unsigned short crc, unsigned char onech) +{ + int i; + unsigned short ans=(crc^onech << 8); + + for (i=0;i<8;i++) { + if (ans & 0x8000) + ans = (ans <<= 1) ^ 4129; + else + ans <<= 1; + } + return ans; +} + +unsigned short icrc(unsigned short crc, unsigned long len, + short jinit, int jrev) +{ + unsigned short icrc1(unsigned short crc, unsigned char onech); + static unsigned short icrctb[256],init=0; + static uchar rchr[256]; + unsigned short tmp1, tmp2, j,cword=crc; + static uchar it[16]={0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15}; + + if (!init) { + init=1; + for (j=0;j<=255;j++) { + icrctb[j]=icrc1(j << 8,(uchar)0); + rchr[j]=(uchar)(it[j & 0xF] << 4 | it[j >> 4]); + } + } + if (jinit >= 0) cword=((uchar) jinit) | (((uchar) jinit) << 8); + else if (jrev < 0) + cword=rchr[HIBYTE(cword)] | rchr[LOBYTE(cword)] << 8; +#ifdef DEBUG + printf("len = %d\n", len); +#endif + for (j=1;j<=len;j++) { + if (jrev < 0) { + tmp1 = rchr[lin[j]]^ HIBYTE(cword); + } + else { + tmp1 = lin[j]^ HIBYTE(cword); + } + cword = icrctb[tmp1] ^ LOBYTE(cword) << 8; + } + if (jrev >= 0) { + tmp2 = cword; + } + else { + tmp2 = rchr[HIBYTE(cword)] | rchr[LOBYTE(cword)] << 8; + } + return (tmp2 ); +} + + +int main(void) +{ + + unsigned short i1,i2; + unsigned long n; + + n=40; + lin[n+1]=0; + i1=icrc(0,n,(short)0,1); + lin[n+1]=HIBYTE(i1); + lin[n+2]=LOBYTE(i1); + i2=icrc(i1,n+2,(short)0,1); + return 0; +} + + diff --git a/test/src/dijkstra.c b/test/src/dijkstra.c new file mode 100644 index 0000000..c3c0fd8 --- /dev/null +++ b/test/src/dijkstra.c @@ -0,0 +1,777 @@ +#include +#include + +#define NUM_NODES 100 +#define NONE 9999 + +// START MALLOC + + + +#define SIMPLE_ALLOC_SIZE 16384 + + + +void *s_malloc(unsigned long size); + +void s_free(void *mem); + +volatile unsigned int sanity = 512; + +char alloc_pool[SIMPLE_ALLOC_SIZE]; +char *alloc_ptr = alloc_pool; + +void *s_malloc(unsigned long size) { + char *copy = alloc_ptr; + alloc_ptr += size; + if (alloc_ptr >= alloc_pool + SIMPLE_ALLOC_SIZE) { + // Wraparound alloc + alloc_ptr = alloc_pool + size; + return alloc_pool; + } + return copy; +} + +void s_free(void *mem) {} + +// END MALLOC + +// START DATA + + + +unsigned long dijkstra_input_data[100][100] = { + {32, 32, 54, 12, 52, 56, 8, 30, 44, 94, 44, 39, 65, 19, 51, 91, 1, + 5, 89, 34, 25, 58, 20, 51, 38, 65, 30, 7, 20, 10, 51, 18, 43, 71, + 97, 61, 26, 5, 57, 70, 65, 0, 75, 29, 86, 93, 87, 87, 64, 75, 88, + 89, 100, 7, 40, 37, 38, 36, 44, 24, 46, 95, 43, 89, 32, 5, 15, 58, + 77, 72, 95, 8, 38, 69, 37, 24, 27, 90, 77, 92, 31, 30, 80, 30, 37, + 86, 33, 76, 21, 77, 100, 68, 37, 8, 22, 69, 81, 38, 94, 57}, + {76, 54, 65, 14, 89, 69, 4, 16, 24, 47, 7, 21, 78, 53, 17, 81, 39, + 50, 22, 60, 93, 89, 94, 30, 97, 16, 65, 43, 20, 24, 67, 62, 78, 98, + 42, 67, 32, 46, 49, 57, 60, 56, 44, 37, 75, 62, 17, 13, 11, 40, 40, + 4, 95, 100, 0, 57, 82, 31, 0, 1, 56, 67, 30, 100, 64, 72, 66, 63, + 18, 81, 19, 44, 2, 63, 81, 78, 91, 64, 91, 2, 70, 97, 73, 64, 97, + 39, 21, 78, 70, 21, 46, 25, 54, 76, 92, 84, 47, 57, 46, 31}, + {38, 31, 75, 40, 61, 21, 84, 51, 86, 41, 19, 21, 37, 58, 86, 100, 97, + 73, 44, 67, 60, 90, 58, 13, 31, 49, 63, 44, 73, 76, 76, 77, 73, 16, + 83, 100, 4, 67, 51, 56, 7, 36, 77, 10, 95, 28, 10, 57, 0, 54, 23, + 60, 9, 48, 39, 40, 97, 69, 84, 35, 44, 25, 11, 83, 8, 61, 83, 12, + 27, 100, 34, 0, 35, 10, 10, 96, 39, 87, 53, 5, 40, 42, 66, 15, 90, + 71, 55, 87, 39, 5, 88, 49, 97, 100, 32, 4, 60, 81, 83, 53}, + {80, 16, 53, 14, 94, 29, 77, 99, 16, 29, 3, 22, 71, 35, 4, 61, 6, + 25, 13, 11, 30, 0, 27, 94, 66, 25, 64, 92, 5, 47, 44, 85, 29, 63, + 65, 89, 59, 41, 87, 41, 36, 57, 29, 7, 92, 33, 34, 64, 59, 47, 76, + 55, 13, 2, 48, 46, 27, 12, 37, 99, 25, 48, 83, 20, 77, 13, 9, 35, + 55, 62, 76, 57, 18, 72, 64, 10, 4, 64, 74, 63, 77, 15, 18, 91, 84, + 32, 36, 77, 10, 39, 75, 35, 87, 23, 22, 30, 37, 31, 65, 58}, + {59, 7, 14, 78, 79, 45, 54, 83, 8, 94, 12, 86, 9, 97, 42, 93, 95, + 44, 70, 5, 83, 10, 40, 36, 34, 62, 66, 71, 59, 97, 95, 18, 3, 8, + 62, 48, 19, 15, 98, 28, 8, 9, 80, 84, 72, 21, 43, 66, 65, 79, 71, + 13, 89, 78, 49, 22, 5, 14, 59, 65, 11, 53, 49, 81, 28, 77, 29, 47, + 92, 26, 41, 66, 1, 20, 50, 73, 7, 59, 4, 72, 37, 76, 86, 25, 19, + 0, 14, 24, 15, 73, 55, 93, 93, 3, 73, 87, 80, 68, 100, 37}, + {94, 41, 3, 61, 27, 19, 33, 35, 78, 38, 73, 14, 80, 58, 5, 99, 59, + 19, 22, 40, 59, 78, 32, 17, 47, 71, 3, 94, 39, 2, 97, 99, 9, 66, + 60, 37, 85, 59, 38, 28, 63, 10, 8, 8, 35, 81, 6, 60, 100, 96, 66, + 24, 39, 64, 41, 52, 34, 10, 11, 39, 80, 8, 4, 89, 74, 64, 92, 25, + 89, 29, 19, 18, 6, 28, 26, 7, 8, 33, 67, 74, 95, 32, 99, 33, 96, + 5, 51, 96, 83, 63, 35, 62, 71, 39, 16, 10, 69, 8, 35, 23}, + {3, 55, 41, 76, 49, 68, 83, 23, 67, 15, 97, 61, 13, 61, 60, 75, 33, + 77, 71, 15, 39, 72, 43, 76, 77, 59, 53, 11, 33, 88, 34, 37, 8, 76, + 79, 23, 9, 62, 46, 76, 43, 9, 2, 57, 70, 28, 31, 69, 4, 68, 84, + 10, 39, 26, 52, 82, 52, 4, 93, 85, 59, 94, 21, 33, 35, 67, 57, 44, + 28, 69, 86, 37, 78, 54, 94, 14, 48, 25, 83, 18, 59, 33, 28, 99, 25, + 81, 46, 77, 51, 39, 62, 9, 32, 49, 43, 33, 15, 100, 77, 9}, + {68, 28, 47, 12, 82, 6, 26, 96, 98, 75, 13, 57, 7, 8, 55, 33, 55, + 0, 76, 5, 5, 3, 15, 3, 53, 58, 36, 34, 23, 79, 10, 57, 6, 23, + 69, 54, 29, 61, 49, 27, 36, 63, 84, 9, 71, 4, 8, 25, 71, 85, 97, + 77, 88, 11, 46, 6, 35, 83, 7, 24, 27, 17, 82, 34, 40, 16, 88, 69, + 44, 3, 62, 46, 32, 45, 55, 2, 49, 64, 94, 87, 14, 90, 63, 68, 68, + 75, 75, 2, 23, 82, 27, 51, 65, 75, 85, 71, 57, 38, 39, 0}, + {7, 1, 46, 39, 12, 68, 41, 28, 31, 0, 14, 45, 91, 43, 12, 58, 17, + 53, 26, 41, 0, 19, 92, 31, 60, 42, 1, 17, 46, 41, 84, 54, 8, 97, + 93, 20, 64, 0, 14, 61, 0, 28, 72, 57, 71, 50, 81, 89, 70, 7, 96, + 70, 26, 87, 1, 87, 95, 69, 70, 40, 9, 19, 94, 84, 15, 87, 71, 45, + 87, 85, 5, 53, 13, 43, 10, 50, 94, 91, 38, 63, 98, 33, 99, 91, 86, + 66, 43, 80, 35, 79, 20, 10, 98, 80, 61, 13, 66, 31, 24, 18}, + {82, 97, 72, 61, 39, 48, 11, 99, 38, 49, 27, 2, 49, 26, 59, 0, 58, + 1, 81, 59, 80, 67, 70, 77, 46, 97, 56, 79, 27, 81, 63, 75, 77, 0, + 36, 82, 48, 47, 81, 53, 62, 7, 55, 77, 100, 13, 78, 24, 81, 24, 83, + 26, 91, 18, 2, 2, 14, 25, 47, 7, 72, 10, 83, 14, 10, 18, 96, 25, + 65, 42, 78, 93, 16, 32, 70, 15, 11, 47, 5, 58, 71, 89, 84, 27, 73, + 86, 96, 88, 77, 43, 95, 48, 19, 43, 62, 96, 61, 24, 20, 92}, + {66, 98, 85, 82, 96, 20, 64, 73, 67, 69, 30, 3, 23, 13, 97, 97, 66, + 58, 50, 42, 0, 44, 57, 86, 54, 85, 82, 14, 8, 1, 73, 41, 66, 23, + 22, 61, 43, 86, 0, 9, 21, 30, 79, 44, 44, 75, 40, 76, 99, 56, 17, + 100, 67, 40, 51, 20, 25, 32, 0, 100, 0, 73, 40, 66, 96, 29, 93, 38, + 81, 93, 13, 1, 90, 92, 46, 100, 32, 52, 75, 31, 8, 58, 97, 75, 99, + 13, 61, 90, 46, 61, 89, 12, 34, 96, 78, 96, 24, 36, 34, 4}, + {96, 13, 73, 85, 72, 18, 50, 70, 36, 24, 67, 10, 82, 29, 51, 80, 43, + 11, 35, 89, 39, 24, 0, 73, 86, 44, 34, 9, 46, 34, 80, 41, 48, 52, + 92, 19, 36, 41, 55, 39, 31, 22, 49, 13, 51, 67, 59, 94, 44, 95, 48, + 83, 85, 48, 21, 70, 58, 56, 45, 4, 90, 91, 11, 3, 43, 70, 89, 45, + 77, 44, 84, 8, 66, 100, 88, 83, 66, 46, 77, 76, 6, 24, 59, 91, 39, + 46, 26, 97, 68, 37, 0, 58, 28, 79, 27, 37, 48, 16, 82, 24}, + {60, 66, 32, 92, 65, 19, 74, 97, 32, 16, 72, 38, 41, 97, 96, 46, 43, + 88, 42, 77, 25, 9, 34, 19, 88, 28, 56, 1, 44, 3, 25, 70, 69, 24, + 27, 100, 9, 0, 96, 7, 84, 34, 12, 91, 30, 7, 36, 39, 95, 78, 16, + 86, 53, 16, 71, 6, 44, 26, 7, 54, 30, 100, 23, 65, 23, 50, 65, 99, + 17, 26, 73, 67, 60, 85, 57, 57, 92, 93, 96, 52, 36, 78, 4, 90, 61, + 75, 96, 4, 68, 3, 25, 64, 69, 14, 28, 58, 31, 59, 56, 48}, + {86, 28, 81, 45, 12, 37, 1, 70, 29, 64, 89, 31, 41, 93, 20, 1, 67, + 83, 73, 0, 52, 98, 64, 20, 78, 93, 78, 8, 17, 100, 22, 2, 95, 2, + 48, 6, 39, 15, 43, 34, 79, 31, 66, 87, 23, 52, 54, 56, 34, 93, 57, + 52, 56, 87, 72, 34, 79, 15, 42, 63, 15, 65, 65, 9, 67, 79, 82, 73, + 95, 91, 6, 39, 21, 38, 92, 10, 91, 46, 67, 91, 38, 90, 43, 95, 76, + 81, 28, 21, 63, 70, 84, 78, 0, 48, 53, 68, 94, 0, 40, 88}, + {92, 12, 93, 12, 17, 85, 23, 7, 30, 56, 64, 34, 45, 73, 28, 87, 20, + 22, 7, 83, 59, 91, 26, 59, 5, 79, 26, 99, 79, 32, 52, 70, 11, 44, + 83, 28, 95, 72, 1, 91, 27, 65, 25, 38, 4, 19, 24, 24, 8, 99, 73, + 67, 89, 99, 25, 60, 77, 18, 24, 21, 16, 42, 58, 27, 53, 6, 55, 47, + 78, 56, 38, 71, 88, 29, 8, 58, 48, 99, 48, 56, 97, 20, 89, 52, 18, + 14, 78, 61, 99, 2, 48, 14, 44, 5, 42, 97, 11, 63, 10, 55}, + {19, 48, 25, 73, 77, 100, 30, 91, 99, 78, 13, 95, 98, 1, 12, 82, 82, + 91, 8, 80, 93, 22, 61, 2, 28, 2, 66, 5, 65, 76, 61, 50, 90, 86, + 22, 32, 52, 52, 22, 50, 96, 1, 10, 59, 70, 90, 40, 51, 80, 14, 98, + 38, 37, 58, 40, 31, 60, 72, 2, 91, 47, 63, 7, 2, 15, 29, 34, 67, + 48, 23, 83, 9, 24, 59, 69, 94, 48, 8, 11, 27, 90, 8, 31, 93, 32, + 38, 90, 58, 9, 92, 48, 23, 55, 55, 25, 36, 51, 60, 69, 65}, + {83, 51, 74, 73, 76, 42, 67, 24, 17, 44, 17, 73, 18, 49, 65, 50, 87, + 54, 7, 62, 11, 21, 85, 32, 77, 10, 68, 94, 70, 36, 24, 52, 53, 98, + 24, 96, 6, 57, 86, 90, 67, 2, 62, 85, 17, 26, 34, 70, 46, 41, 32, + 23, 63, 16, 56, 5, 26, 23, 65, 62, 26, 89, 80, 45, 52, 71, 6, 58, + 27, 92, 47, 61, 61, 75, 45, 78, 67, 46, 14, 12, 53, 46, 36, 82, 28, + 58, 87, 21, 47, 17, 83, 73, 72, 63, 85, 24, 33, 91, 48, 26}, + {49, 62, 53, 9, 36, 99, 53, 3, 10, 67, 82, 63, 79, 84, 45, 7, 41, + 98, 95, 89, 82, 43, 27, 53, 5, 78, 77, 4, 69, 25, 98, 17, 53, 16, + 93, 89, 81, 45, 58, 91, 12, 40, 54, 91, 90, 65, 64, 31, 62, 58, 86, + 43, 1, 12, 63, 73, 91, 39, 44, 25, 30, 7, 8, 83, 23, 0, 38, 4, + 45, 96, 61, 23, 1, 14, 81, 92, 45, 44, 89, 74, 69, 74, 83, 36, 52, + 45, 75, 8, 85, 18, 100, 81, 92, 7, 30, 82, 74, 34, 52, 86}, + {96, 12, 8, 98, 94, 89, 55, 38, 100, 43, 11, 68, 83, 95, 3, 0, 39, + 78, 9, 90, 63, 8, 37, 20, 83, 67, 1, 56, 67, 53, 7, 62, 66, 16, + 25, 25, 71, 80, 63, 70, 89, 75, 3, 37, 35, 6, 38, 74, 51, 47, 30, + 80, 21, 67, 100, 3, 100, 68, 26, 66, 87, 33, 27, 52, 15, 53, 43, 53, + 99, 6, 22, 88, 47, 26, 24, 82, 99, 28, 21, 15, 75, 51, 95, 63, 84, + 61, 66, 83, 28, 58, 14, 14, 58, 42, 33, 39, 61, 76, 92, 25}, + {48, 14, 79, 95, 6, 70, 76, 4, 98, 98, 87, 39, 14, 81, 1, 99, 7, + 33, 81, 1, 92, 96, 16, 15, 3, 15, 54, 30, 57, 12, 55, 5, 93, 0, + 100, 99, 70, 42, 69, 67, 39, 21, 5, 53, 2, 6, 51, 76, 40, 99, 78, + 98, 60, 60, 79, 63, 75, 99, 59, 98, 10, 80, 2, 2, 80, 69, 67, 49, + 10, 2, 16, 49, 23, 88, 68, 92, 95, 86, 68, 0, 84, 11, 64, 43, 71, + 42, 72, 45, 40, 97, 42, 17, 76, 11, 86, 56, 80, 19, 4, 90}, + {88, 87, 4, 77, 75, 72, 69, 35, 23, 2, 35, 6, 80, 99, 15, 50, 6, + 53, 61, 46, 49, 69, 29, 25, 80, 15, 47, 25, 34, 51, 14, 21, 38, 85, + 98, 79, 57, 32, 13, 46, 0, 48, 53, 80, 12, 34, 29, 18, 54, 56, 30, + 2, 25, 60, 94, 4, 41, 40, 30, 75, 58, 10, 62, 62, 96, 59, 40, 18, + 58, 53, 64, 24, 67, 83, 4, 79, 17, 100, 63, 37, 56, 93, 39, 81, 18, + 100, 51, 59, 5, 81, 100, 63, 58, 61, 24, 53, 87, 64, 37, 10}, + {83, 67, 34, 49, 50, 38, 27, 33, 4, 56, 70, 60, 15, 75, 6, 33, 40, + 57, 59, 46, 4, 24, 75, 62, 86, 100, 81, 38, 29, 17, 48, 79, 84, 48, + 27, 100, 87, 21, 32, 57, 77, 68, 16, 92, 9, 22, 92, 49, 79, 16, 95, + 83, 40, 70, 10, 25, 35, 91, 29, 30, 74, 43, 8, 24, 92, 2, 23, 44, + 23, 22, 0, 66, 56, 16, 58, 65, 4, 15, 14, 49, 31, 75, 32, 71, 10, + 8, 63, 45, 100, 92, 42, 73, 1, 50, 97, 93, 18, 87, 36, 41}, + {75, 36, 7, 30, 18, 31, 96, 22, 12, 76, 71, 43, 50, 69, 80, 61, 78, + 42, 72, 43, 0, 13, 15, 68, 30, 79, 60, 48, 31, 62, 56, 5, 98, 29, + 1, 82, 26, 97, 3, 38, 72, 40, 81, 89, 76, 26, 15, 53, 35, 87, 96, + 1, 67, 77, 69, 97, 21, 28, 10, 18, 90, 32, 23, 53, 61, 25, 34, 87, + 88, 3, 91, 26, 9, 37, 81, 85, 64, 96, 3, 99, 82, 65, 100, 48, 42, + 68, 10, 29, 62, 88, 48, 17, 19, 37, 70, 47, 28, 70, 100, 16}, + {73, 91, 8, 82, 94, 89, 33, 57, 84, 36, 21, 31, 1, 87, 46, 9, 20, + 56, 4, 82, 9, 52, 99, 96, 56, 34, 8, 84, 3, 7, 66, 42, 64, 74, + 24, 58, 28, 23, 81, 11, 59, 2, 9, 26, 55, 55, 1, 76, 77, 6, 23, + 87, 24, 89, 82, 80, 22, 90, 30, 93, 63, 96, 34, 27, 36, 24, 51, 30, + 47, 98, 8, 73, 100, 17, 99, 21, 72, 0, 97, 48, 73, 86, 34, 97, 74, + 82, 43, 63, 37, 73, 55, 0, 34, 55, 94, 36, 80, 10, 67, 93}, + {7, 75, 65, 74, 92, 64, 95, 63, 30, 57, 77, 2, 42, 11, 65, 16, 59, + 7, 45, 97, 46, 66, 63, 81, 20, 56, 83, 66, 32, 49, 59, 39, 90, 23, + 12, 81, 53, 73, 9, 49, 29, 87, 17, 72, 64, 83, 54, 89, 90, 65, 85, + 36, 30, 13, 83, 16, 35, 65, 83, 67, 14, 7, 73, 70, 97, 85, 51, 16, + 24, 26, 65, 53, 79, 83, 91, 8, 65, 10, 98, 20, 41, 48, 22, 71, 62, + 4, 54, 63, 36, 36, 30, 16, 9, 2, 86, 5, 53, 36, 88, 77}, + {29, 53, 97, 74, 1, 53, 83, 32, 30, 46, 52, 71, 94, 41, 42, 21, 45, + 62, 85, 81, 98, 81, 97, 73, 83, 83, 44, 1, 85, 32, 45, 80, 85, 41, + 54, 52, 60, 2, 84, 90, 48, 1, 61, 7, 42, 69, 96, 54, 30, 46, 0, + 94, 26, 64, 32, 75, 46, 76, 42, 97, 7, 87, 43, 58, 94, 97, 9, 54, + 99, 59, 43, 12, 61, 70, 19, 69, 4, 14, 22, 0, 26, 23, 60, 52, 53, + 92, 93, 65, 68, 35, 61, 75, 88, 70, 33, 82, 66, 8, 35, 30}, + {68, 44, 8, 95, 81, 28, 63, 85, 8, 52, 86, 35, 41, 11, 53, 94, 3, + 12, 58, 71, 13, 85, 11, 0, 55, 44, 82, 87, 19, 83, 84, 87, 27, 92, + 81, 7, 86, 9, 58, 61, 27, 9, 62, 68, 21, 81, 61, 24, 93, 85, 61, + 72, 70, 72, 73, 91, 16, 20, 77, 35, 3, 26, 88, 97, 18, 34, 3, 70, + 9, 27, 30, 37, 37, 92, 4, 24, 73, 32, 48, 31, 83, 8, 3, 52, 80, + 42, 8, 62, 62, 52, 63, 65, 78, 16, 27, 62, 50, 30, 32, 26}, + {24, 62, 63, 27, 20, 67, 51, 59, 65, 65, 90, 48, 73, 93, 66, 18, 0, + 75, 47, 63, 26, 76, 94, 3, 59, 21, 66, 75, 17, 64, 0, 41, 25, 63, + 68, 11, 97, 85, 70, 61, 49, 60, 8, 88, 18, 41, 6, 19, 15, 19, 48, + 41, 61, 41, 10, 19, 62, 42, 95, 46, 5, 95, 53, 98, 58, 21, 8, 20, + 5, 79, 81, 21, 4, 56, 8, 89, 97, 81, 74, 11, 100, 21, 18, 61, 29, + 95, 46, 57, 37, 40, 2, 42, 1, 56, 5, 59, 43, 14, 79, 14}, + {59, 25, 35, 29, 81, 44, 84, 43, 24, 58, 20, 91, 45, 38, 17, 74, 100, + 63, 31, 36, 3, 33, 44, 71, 55, 50, 96, 98, 30, 40, 12, 55, 65, 13, + 50, 12, 57, 33, 55, 48, 91, 42, 38, 36, 46, 55, 76, 45, 17, 6, 81, + 87, 6, 25, 57, 61, 41, 52, 25, 37, 92, 3, 92, 23, 16, 7, 35, 74, + 40, 56, 21, 98, 98, 59, 100, 44, 80, 75, 89, 97, 82, 36, 50, 54, 27, + 6, 14, 68, 25, 5, 4, 83, 8, 62, 5, 25, 69, 40, 65, 75}, + {63, 52, 72, 60, 10, 71, 70, 56, 12, 59, 52, 94, 95, 68, 13, 21, 41, + 94, 55, 66, 100, 25, 48, 7, 53, 54, 99, 88, 60, 63, 62, 22, 14, 34, + 49, 91, 71, 18, 46, 83, 77, 65, 42, 37, 32, 55, 24, 39, 15, 45, 4, + 14, 36, 19, 21, 89, 39, 87, 76, 99, 49, 4, 88, 64, 4, 36, 54, 75, + 20, 67, 24, 64, 31, 32, 0, 29, 54, 92, 69, 69, 36, 39, 83, 39, 58, + 70, 27, 63, 56, 70, 28, 5, 74, 15, 35, 78, 17, 55, 18, 37}, + {88, 8, 0, 85, 41, 68, 14, 95, 59, 49, 63, 61, 54, 11, 66, 79, 81, + 94, 41, 3, 29, 69, 75, 69, 50, 9, 46, 33, 30, 30, 71, 18, 39, 37, + 2, 80, 4, 83, 40, 29, 98, 2, 57, 52, 13, 22, 30, 60, 82, 71, 29, + 10, 6, 3, 79, 22, 79, 91, 56, 76, 21, 26, 94, 26, 63, 62, 72, 34, + 45, 11, 29, 42, 13, 86, 94, 93, 75, 90, 18, 56, 27, 48, 33, 33, 17, + 78, 55, 63, 69, 10, 38, 56, 2, 31, 48, 32, 93, 19, 32, 3}, + {30, 61, 46, 43, 13, 5, 1, 88, 96, 86, 9, 89, 100, 42, 21, 17, 20, + 42, 80, 55, 19, 17, 10, 88, 14, 58, 19, 6, 77, 17, 77, 73, 79, 22, + 15, 58, 94, 83, 45, 55, 68, 20, 43, 68, 63, 30, 51, 49, 39, 97, 3, + 58, 13, 80, 45, 27, 3, 31, 100, 80, 48, 76, 52, 93, 64, 33, 50, 24, + 82, 61, 45, 15, 82, 89, 49, 10, 85, 100, 59, 23, 96, 28, 81, 75, 7, + 93, 68, 10, 90, 34, 56, 3, 76, 74, 97, 6, 73, 12, 30, 20}, + {40, 75, 35, 88, 29, 85, 64, 14, 50, 22, 37, 12, 16, 85, 87, 23, 77, + 21, 100, 66, 55, 21, 35, 30, 95, 31, 2, 33, 10, 32, 53, 16, 74, 54, + 70, 69, 38, 33, 83, 55, 55, 87, 67, 71, 71, 19, 60, 13, 40, 25, 45, + 61, 46, 80, 58, 6, 78, 60, 39, 88, 93, 58, 70, 32, 11, 39, 0, 16, + 72, 50, 71, 93, 36, 37, 29, 6, 56, 55, 19, 63, 80, 64, 23, 25, 43, + 81, 98, 87, 41, 2, 40, 100, 60, 9, 31, 37, 14, 98, 53, 86}, + {47, 90, 44, 83, 26, 73, 55, 49, 27, 40, 11, 73, 70, 0, 64, 13, 82, + 61, 66, 89, 29, 6, 88, 89, 15, 85, 93, 30, 82, 11, 82, 96, 1, 26, + 78, 27, 65, 100, 42, 93, 39, 53, 31, 9, 54, 96, 89, 1, 22, 54, 90, + 52, 60, 43, 6, 42, 27, 99, 72, 75, 10, 19, 70, 11, 45, 14, 4, 10, + 13, 47, 69, 52, 66, 100, 27, 86, 61, 15, 53, 84, 36, 42, 35, 96, 85, + 41, 37, 78, 40, 75, 53, 16, 95, 22, 94, 5, 36, 98, 15, 15}, + {10, 50, 34, 77, 16, 61, 28, 77, 43, 82, 60, 79, 90, 95, 74, 41, 2, + 78, 18, 8, 18, 71, 24, 12, 60, 17, 85, 62, 81, 66, 78, 92, 16, 11, + 34, 32, 38, 28, 75, 81, 9, 1, 59, 66, 62, 100, 6, 64, 43, 24, 72, + 61, 62, 62, 40, 21, 79, 24, 49, 26, 90, 26, 84, 72, 3, 84, 70, 8, + 11, 45, 89, 88, 46, 14, 53, 74, 80, 59, 38, 89, 83, 9, 15, 10, 38, + 55, 31, 83, 45, 81, 8, 1, 73, 92, 73, 43, 75, 9, 51, 53}, + {54, 5, 40, 66, 86, 59, 39, 31, 17, 43, 19, 66, 19, 1, 77, 57, 22, + 74, 39, 68, 20, 14, 35, 60, 5, 7, 2, 47, 16, 19, 66, 36, 91, 5, + 68, 43, 30, 74, 40, 47, 83, 26, 79, 1, 27, 21, 24, 49, 96, 64, 83, + 82, 78, 17, 41, 49, 92, 9, 62, 74, 28, 27, 77, 86, 99, 44, 95, 28, + 84, 34, 41, 33, 60, 20, 34, 87, 41, 59, 36, 2, 89, 85, 85, 32, 2, + 25, 47, 94, 35, 9, 67, 29, 2, 43, 81, 1, 54, 75, 96, 3}, + {9, 37, 36, 35, 23, 37, 22, 30, 62, 24, 33, 50, 8, 84, 48, 77, 8, + 95, 70, 9, 70, 37, 5, 73, 46, 86, 74, 100, 27, 35, 70, 2, 72, 5, + 37, 95, 42, 25, 25, 3, 49, 24, 19, 24, 7, 67, 0, 82, 28, 71, 92, + 98, 74, 63, 70, 86, 14, 9, 52, 41, 45, 21, 43, 83, 93, 47, 44, 35, + 72, 35, 4, 88, 59, 91, 11, 32, 57, 11, 13, 51, 48, 71, 49, 88, 33, + 85, 40, 48, 61, 92, 55, 5, 79, 65, 54, 71, 11, 98, 72, 83}, + {32, 43, 70, 57, 33, 47, 89, 56, 25, 69, 7, 73, 39, 56, 27, 39, 6, + 67, 53, 67, 24, 74, 38, 2, 38, 93, 73, 49, 56, 11, 99, 89, 54, 34, + 11, 87, 48, 67, 42, 73, 35, 49, 11, 40, 71, 4, 45, 78, 71, 98, 10, + 95, 38, 49, 63, 76, 41, 36, 92, 97, 47, 56, 51, 0, 56, 63, 53, 3, + 29, 95, 76, 30, 44, 54, 70, 81, 58, 82, 58, 96, 45, 69, 56, 83, 84, + 19, 59, 24, 21, 16, 87, 34, 72, 4, 0, 27, 33, 53, 31, 28}, + {47, 73, 58, 57, 26, 94, 38, 85, 75, 62, 80, 87, 97, 35, 69, 80, 20, + 27, 3, 41, 43, 57, 75, 81, 27, 75, 8, 60, 27, 5, 88, 41, 78, 11, + 98, 71, 71, 1, 55, 12, 64, 0, 99, 60, 1, 67, 40, 22, 61, 9, 63, + 70, 32, 4, 51, 59, 79, 25, 18, 73, 30, 72, 13, 7, 49, 77, 78, 87, + 79, 99, 99, 42, 65, 63, 68, 67, 96, 7, 55, 56, 84, 84, 93, 15, 88, + 43, 75, 33, 34, 59, 72, 64, 98, 85, 37, 12, 27, 82, 99, 5}, + {80, 63, 13, 11, 92, 48, 44, 88, 55, 99, 9, 4, 48, 1, 20, 2, 10, + 61, 1, 44, 86, 73, 74, 83, 23, 11, 62, 50, 93, 26, 22, 38, 90, 1, + 15, 47, 49, 59, 34, 71, 23, 44, 75, 38, 11, 61, 40, 22, 21, 41, 32, + 7, 13, 6, 56, 36, 84, 17, 52, 76, 44, 74, 80, 100, 42, 96, 46, 91, + 20, 81, 27, 10, 91, 2, 48, 1, 29, 88, 90, 51, 95, 22, 58, 7, 95, + 13, 9, 78, 31, 61, 19, 41, 1, 65, 40, 43, 26, 86, 100, 47}, + {32, 94, 23, 22, 62, 71, 91, 91, 58, 80, 41, 18, 68, 65, 25, 62, 79, + 0, 5, 76, 27, 24, 83, 28, 56, 22, 37, 82, 74, 3, 95, 6, 97, 17, + 95, 24, 54, 85, 14, 78, 31, 56, 96, 99, 20, 87, 27, 65, 87, 32, 6, + 14, 23, 89, 8, 45, 77, 12, 26, 51, 82, 88, 23, 44, 71, 17, 68, 25, + 69, 82, 2, 100, 3, 99, 64, 91, 85, 91, 21, 38, 90, 28, 52, 79, 83, + 26, 23, 60, 38, 49, 10, 86, 2, 33, 29, 74, 16, 97, 65, 51}, + {45, 67, 16, 48, 31, 81, 4, 16, 37, 26, 20, 93, 20, 38, 71, 2, 64, + 94, 62, 69, 9, 72, 54, 11, 71, 84, 51, 54, 80, 15, 4, 24, 83, 88, + 39, 80, 68, 43, 62, 71, 35, 82, 64, 55, 19, 0, 58, 84, 95, 19, 18, + 3, 58, 72, 81, 95, 55, 32, 14, 1, 47, 19, 92, 96, 6, 30, 76, 40, + 40, 37, 77, 75, 19, 6, 30, 38, 7, 54, 88, 68, 73, 5, 71, 97, 78, + 51, 58, 99, 49, 72, 66, 97, 57, 58, 58, 63, 54, 33, 69, 60}, + {37, 12, 1, 56, 18, 31, 60, 92, 51, 14, 59, 90, 19, 29, 87, 63, 47, + 10, 28, 96, 82, 94, 58, 39, 17, 16, 68, 38, 15, 3, 64, 52, 15, 65, + 74, 100, 62, 0, 92, 12, 14, 50, 2, 33, 46, 55, 63, 59, 65, 91, 20, + 46, 50, 79, 51, 34, 61, 19, 72, 76, 89, 35, 95, 3, 67, 68, 69, 28, + 68, 60, 41, 82, 77, 43, 82, 22, 98, 44, 47, 28, 0, 67, 74, 50, 11, + 92, 84, 72, 77, 21, 14, 65, 23, 8, 34, 90, 42, 2, 84, 10}, + {63, 24, 58, 5, 33, 5, 94, 97, 15, 40, 24, 15, 6, 65, 32, 18, 56, + 82, 56, 32, 70, 70, 97, 93, 78, 30, 48, 87, 99, 31, 97, 27, 22, 20, + 32, 55, 93, 25, 52, 7, 31, 42, 90, 4, 6, 88, 89, 62, 35, 44, 60, + 4, 81, 56, 63, 24, 52, 10, 10, 17, 8, 73, 44, 30, 94, 77, 51, 86, + 68, 69, 59, 66, 11, 48, 70, 84, 1, 58, 12, 37, 68, 72, 41, 48, 95, + 71, 73, 12, 47, 83, 29, 55, 56, 74, 51, 15, 16, 2, 67, 50}, + {71, 92, 15, 82, 6, 51, 66, 7, 75, 44, 44, 43, 15, 52, 57, 9, 22, + 96, 89, 35, 79, 17, 91, 0, 57, 7, 82, 73, 9, 14, 90, 81, 5, 4, + 28, 11, 22, 60, 19, 97, 3, 29, 5, 86, 81, 63, 61, 69, 58, 49, 71, + 2, 67, 27, 69, 90, 34, 50, 29, 44, 64, 18, 91, 36, 89, 85, 47, 10, + 45, 32, 7, 14, 62, 12, 100, 8, 41, 61, 44, 100, 9, 14, 68, 42, 41, + 37, 99, 75, 87, 27, 85, 17, 45, 75, 53, 33, 26, 66, 10, 71}, + {99, 84, 85, 60, 62, 51, 68, 3, 11, 11, 69, 87, 92, 36, 96, 32, 39, + 94, 74, 93, 87, 58, 9, 31, 100, 28, 30, 25, 94, 6, 62, 92, 90, 12, + 17, 52, 29, 86, 55, 40, 63, 90, 94, 21, 92, 55, 53, 31, 14, 93, 23, + 0, 17, 99, 98, 16, 26, 27, 7, 86, 34, 35, 78, 90, 13, 95, 41, 43, + 46, 62, 49, 76, 51, 42, 97, 9, 63, 15, 40, 77, 8, 63, 43, 25, 61, + 40, 7, 53, 68, 81, 38, 68, 82, 82, 57, 95, 43, 65, 37, 55}, + {93, 87, 30, 10, 95, 93, 19, 58, 75, 59, 0, 83, 88, 44, 74, 14, 50, + 47, 67, 17, 94, 71, 51, 75, 53, 75, 69, 96, 5, 73, 16, 98, 59, 13, + 7, 19, 5, 93, 43, 80, 17, 44, 28, 4, 54, 68, 18, 3, 14, 51, 88, + 7, 22, 4, 48, 41, 45, 17, 2, 50, 90, 18, 14, 14, 31, 88, 33, 3, + 81, 77, 49, 98, 87, 44, 2, 6, 11, 87, 76, 93, 4, 63, 66, 26, 34, + 14, 33, 79, 98, 35, 29, 53, 19, 43, 67, 51, 30, 66, 20, 77}, + {8, 69, 75, 61, 79, 43, 33, 91, 96, 9, 49, 100, 38, 14, 25, 72, 28, + 58, 51, 92, 59, 46, 44, 79, 55, 77, 96, 51, 9, 15, 28, 17, 50, 69, + 45, 29, 11, 78, 86, 6, 53, 34, 73, 92, 48, 98, 29, 43, 22, 46, 34, + 47, 92, 79, 25, 12, 55, 87, 64, 64, 68, 58, 48, 18, 93, 59, 13, 70, + 2, 99, 76, 56, 32, 14, 13, 46, 12, 42, 89, 0, 89, 23, 13, 46, 1, + 5, 59, 22, 92, 89, 53, 60, 12, 67, 44, 4, 92, 57, 74, 94}, + {55, 15, 15, 53, 30, 28, 99, 8, 71, 88, 75, 59, 77, 88, 4, 44, 93, + 29, 66, 51, 17, 85, 10, 96, 17, 54, 100, 8, 77, 73, 2, 31, 89, 17, + 50, 85, 46, 48, 93, 83, 35, 67, 7, 11, 54, 78, 21, 13, 7, 88, 64, + 91, 38, 74, 87, 56, 94, 86, 64, 70, 25, 32, 67, 80, 50, 16, 64, 62, + 30, 56, 10, 32, 89, 17, 9, 8, 95, 31, 21, 68, 18, 85, 59, 22, 24, + 11, 78, 84, 97, 42, 19, 88, 40, 86, 67, 90, 68, 30, 17, 99}, + {52, 27, 30, 40, 44, 5, 49, 5, 36, 70, 73, 20, 21, 31, 43, 11, 42, + 20, 96, 5, 28, 14, 93, 69, 67, 26, 24, 34, 56, 8, 99, 75, 35, 95, + 14, 46, 0, 29, 51, 36, 66, 23, 57, 87, 21, 100, 98, 29, 86, 59, 0, + 81, 74, 60, 15, 40, 86, 39, 40, 7, 47, 5, 82, 49, 100, 63, 95, 66, + 92, 11, 2, 57, 0, 25, 9, 21, 91, 74, 17, 76, 32, 17, 22, 72, 43, + 37, 78, 28, 77, 18, 36, 90, 90, 84, 38, 89, 46, 99, 21, 4}, + {9, 90, 27, 10, 14, 3, 98, 4, 77, 14, 46, 75, 99, 35, 47, 41, 72, + 24, 70, 48, 8, 72, 4, 98, 55, 42, 53, 68, 7, 74, 72, 16, 63, 99, + 26, 43, 1, 24, 13, 44, 4, 25, 19, 2, 60, 32, 10, 32, 22, 80, 46, + 98, 17, 50, 95, 38, 59, 13, 5, 66, 87, 77, 48, 15, 42, 41, 58, 9, + 31, 71, 54, 35, 97, 39, 4, 56, 37, 14, 88, 59, 60, 0, 56, 77, 50, + 17, 81, 75, 30, 87, 6, 84, 29, 55, 99, 37, 96, 57, 47, 26}, + {94, 67, 27, 56, 5, 98, 12, 8, 11, 66, 67, 37, 66, 90, 80, 83, 6, + 61, 23, 2, 47, 30, 86, 42, 51, 51, 80, 46, 74, 26, 38, 67, 59, 31, + 23, 64, 29, 1, 38, 6, 33, 4, 44, 100, 60, 90, 48, 32, 50, 71, 1, + 63, 67, 87, 5, 17, 3, 51, 29, 77, 77, 33, 10, 35, 65, 100, 65, 60, + 0, 2, 32, 33, 73, 42, 99, 100, 32, 12, 31, 48, 84, 99, 11, 50, 86, + 83, 34, 55, 33, 63, 32, 76, 97, 8, 77, 27, 7, 7, 53, 74}, + {76, 85, 73, 14, 27, 72, 13, 59, 50, 11, 73, 33, 9, 84, 50, 61, 32, + 84, 16, 31, 12, 14, 6, 8, 89, 49, 1, 96, 56, 54, 35, 31, 39, 7, + 46, 32, 45, 59, 57, 96, 36, 29, 95, 46, 80, 10, 73, 11, 94, 89, 9, + 73, 69, 15, 47, 57, 31, 49, 18, 87, 69, 53, 18, 74, 27, 30, 5, 38, + 55, 28, 33, 92, 58, 95, 3, 37, 4, 76, 14, 65, 31, 23, 37, 66, 5, + 50, 23, 36, 99, 41, 22, 68, 61, 6, 7, 88, 2, 13, 92, 58}, + {41, 92, 15, 65, 86, 18, 1, 56, 60, 83, 87, 57, 5, 90, 23, 10, 40, + 12, 12, 38, 19, 35, 72, 80, 7, 80, 33, 10, 59, 25, 34, 66, 16, 49, + 31, 68, 33, 99, 23, 59, 47, 10, 16, 53, 100, 5, 29, 39, 17, 42, 44, + 2, 43, 82, 49, 16, 27, 82, 93, 86, 73, 26, 18, 55, 75, 49, 89, 7, + 13, 79, 33, 61, 55, 15, 80, 20, 20, 75, 60, 3, 83, 70, 5, 92, 17, + 54, 8, 45, 2, 0, 30, 41, 27, 14, 63, 68, 29, 51, 42, 43}, + {96, 75, 70, 50, 90, 49, 71, 9, 90, 97, 79, 73, 66, 50, 64, 83, 4, + 72, 27, 73, 39, 24, 80, 32, 4, 42, 100, 34, 60, 41, 43, 55, 82, 12, + 5, 71, 27, 42, 46, 16, 38, 24, 89, 3, 41, 19, 52, 11, 57, 46, 84, + 96, 36, 29, 27, 40, 72, 94, 40, 98, 0, 83, 18, 83, 95, 90, 53, 88, + 31, 66, 71, 69, 56, 59, 38, 97, 44, 57, 7, 1, 2, 57, 97, 4, 87, + 91, 10, 24, 84, 51, 21, 84, 33, 39, 66, 95, 96, 86, 82, 26}, + {51, 52, 96, 73, 78, 33, 70, 21, 90, 77, 89, 58, 0, 86, 28, 87, 42, + 39, 10, 25, 56, 98, 75, 89, 2, 7, 49, 98, 59, 98, 24, 76, 15, 86, + 48, 59, 18, 17, 81, 75, 61, 69, 99, 61, 20, 27, 13, 62, 32, 90, 53, + 88, 87, 95, 42, 89, 1, 58, 53, 60, 55, 43, 1, 70, 28, 49, 29, 12, + 33, 76, 53, 60, 10, 52, 87, 98, 45, 100, 25, 43, 89, 79, 97, 41, 73, + 4, 96, 40, 62, 48, 66, 16, 91, 67, 53, 85, 82, 48, 98, 14}, + {90, 50, 74, 66, 68, 26, 63, 12, 25, 89, 55, 80, 33, 17, 20, 72, 22, + 83, 11, 84, 30, 77, 67, 88, 9, 86, 72, 91, 33, 35, 72, 89, 86, 11, + 54, 53, 38, 17, 32, 29, 72, 53, 76, 71, 71, 62, 42, 93, 44, 19, 76, + 41, 62, 42, 28, 71, 27, 66, 27, 26, 1, 99, 14, 87, 10, 35, 5, 14, + 52, 37, 43, 90, 91, 18, 60, 27, 81, 68, 19, 24, 87, 95, 31, 48, 3, + 59, 18, 97, 92, 11, 90, 93, 10, 70, 45, 20, 4, 16, 34, 22}, + {54, 43, 11, 10, 62, 37, 37, 8, 4, 22, 99, 57, 83, 30, 4, 86, 55, + 89, 49, 46, 0, 38, 38, 77, 74, 49, 97, 79, 66, 97, 0, 86, 5, 79, + 62, 33, 15, 65, 41, 87, 87, 6, 9, 35, 2, 14, 21, 57, 69, 36, 3, + 35, 40, 7, 11, 13, 23, 74, 92, 55, 36, 93, 40, 42, 37, 68, 75, 18, + 32, 83, 71, 85, 89, 81, 19, 91, 61, 6, 13, 29, 8, 16, 65, 48, 91, + 76, 62, 80, 16, 19, 34, 52, 78, 74, 94, 14, 7, 69, 33, 5}, + {17, 3, 56, 5, 84, 41, 62, 44, 48, 75, 40, 56, 58, 71, 71, 14, 12, + 99, 94, 28, 17, 27, 81, 96, 67, 74, 76, 74, 8, 75, 45, 25, 79, 0, + 97, 28, 41, 58, 39, 55, 100, 45, 11, 23, 15, 48, 37, 27, 46, 97, 56, + 63, 90, 36, 24, 56, 76, 0, 96, 85, 41, 40, 9, 19, 6, 6, 14, 47, + 30, 19, 2, 96, 64, 80, 18, 45, 27, 21, 72, 39, 17, 94, 1, 6, 96, + 93, 28, 72, 59, 90, 56, 100, 96, 31, 86, 1, 3, 66, 15, 0}, + {85, 17, 96, 14, 63, 81, 59, 90, 1, 97, 28, 19, 57, 96, 92, 52, 54, + 87, 23, 12, 76, 45, 79, 72, 43, 64, 39, 46, 29, 54, 12, 80, 37, 8, + 60, 100, 89, 85, 55, 56, 47, 49, 75, 3, 45, 33, 56, 99, 19, 45, 78, + 61, 91, 56, 99, 33, 86, 4, 45, 81, 58, 58, 60, 96, 32, 19, 61, 87, + 70, 16, 42, 16, 65, 84, 20, 76, 83, 42, 41, 68, 87, 18, 28, 77, 40, + 94, 76, 25, 98, 88, 5, 21, 11, 31, 16, 43, 16, 44, 29, 86}, + {60, 37, 1, 24, 20, 88, 67, 69, 29, 7, 36, 16, 25, 65, 59, 65, 24, + 1, 56, 21, 89, 61, 42, 100, 58, 25, 8, 74, 69, 3, 25, 95, 40, 26, + 85, 27, 81, 51, 96, 9, 58, 32, 25, 49, 63, 51, 80, 87, 52, 35, 74, + 40, 62, 82, 5, 19, 73, 13, 59, 7, 16, 84, 1, 56, 77, 53, 49, 57, + 3, 45, 66, 28, 43, 58, 77, 72, 8, 57, 58, 60, 92, 98, 66, 20, 79, + 71, 39, 52, 84, 65, 59, 100, 48, 27, 21, 91, 80, 71, 47, 83}, + {82, 80, 10, 24, 37, 54, 62, 45, 10, 86, 71, 68, 83, 36, 88, 27, 6, + 94, 79, 56, 58, 4, 55, 72, 98, 42, 63, 77, 12, 9, 25, 60, 89, 2, + 50, 92, 56, 11, 2, 32, 97, 73, 100, 79, 75, 88, 73, 47, 47, 17, 2, + 4, 21, 23, 42, 18, 66, 4, 61, 44, 81, 87, 71, 35, 89, 20, 27, 10, + 32, 96, 42, 95, 69, 41, 40, 9, 95, 12, 23, 41, 29, 25, 11, 17, 15, + 54, 1, 47, 24, 63, 57, 4, 49, 27, 40, 3, 48, 33, 13, 46}, + {95, 55, 40, 29, 96, 46, 39, 57, 58, 62, 98, 54, 53, 76, 71, 68, 29, + 72, 81, 53, 34, 38, 24, 49, 65, 30, 52, 79, 29, 31, 24, 23, 86, 31, + 53, 48, 77, 92, 4, 1, 19, 68, 55, 72, 9, 92, 6, 38, 63, 87, 58, + 64, 24, 82, 79, 56, 78, 98, 34, 6, 28, 25, 29, 81, 22, 82, 28, 65, + 39, 99, 66, 58, 32, 87, 97, 42, 78, 2, 46, 7, 55, 3, 71, 46, 51, + 49, 1, 28, 46, 1, 34, 41, 26, 30, 21, 48, 11, 49, 80, 17}, + {13, 45, 75, 11, 99, 37, 53, 76, 39, 66, 83, 95, 35, 19, 40, 87, 69, + 7, 81, 81, 8, 82, 21, 35, 11, 42, 49, 89, 57, 95, 5, 36, 40, 47, + 14, 38, 84, 33, 80, 23, 99, 29, 84, 34, 48, 90, 87, 16, 97, 67, 64, + 71, 48, 51, 72, 59, 60, 88, 48, 83, 82, 53, 86, 21, 66, 100, 25, 50, + 32, 72, 39, 31, 0, 22, 65, 48, 78, 51, 31, 40, 84, 61, 10, 32, 11, + 83, 57, 71, 70, 4, 20, 51, 24, 5, 39, 90, 4, 30, 5, 36}, + {1, 44, 33, 68, 66, 64, 16, 9, 81, 13, 49, 65, 74, 60, 97, 51, 42, + 19, 89, 11, 24, 8, 28, 14, 13, 67, 70, 84, 64, 76, 86, 65, 19, 19, + 100, 52, 83, 15, 61, 64, 95, 10, 95, 34, 70, 57, 85, 78, 76, 73, 55, + 66, 47, 83, 80, 60, 16, 16, 9, 80, 92, 96, 10, 77, 14, 9, 28, 63, + 91, 56, 93, 85, 32, 87, 18, 68, 43, 70, 45, 19, 42, 66, 85, 56, 48, + 31, 82, 30, 47, 92, 9, 4, 87, 87, 81, 67, 96, 76, 29, 87}, + {31, 89, 37, 63, 75, 22, 97, 85, 92, 41, 70, 100, 73, 20, 55, 20, 51, + 37, 17, 64, 28, 93, 68, 81, 79, 15, 47, 75, 91, 42, 27, 88, 30, 64, + 16, 72, 52, 12, 56, 43, 19, 25, 43, 92, 45, 64, 78, 63, 0, 95, 26, + 95, 54, 61, 75, 32, 76, 88, 73, 32, 30, 66, 86, 26, 97, 1, 98, 48, + 80, 19, 92, 99, 10, 0, 56, 56, 64, 33, 85, 65, 95, 77, 59, 48, 3, + 0, 46, 45, 88, 19, 77, 84, 51, 62, 10, 47, 29, 74, 96, 8}, + {94, 53, 73, 3, 53, 28, 25, 16, 62, 76, 47, 22, 53, 73, 70, 22, 73, + 15, 68, 60, 0, 10, 44, 52, 73, 54, 65, 68, 94, 60, 77, 53, 79, 15, + 23, 31, 44, 48, 14, 72, 91, 27, 94, 9, 100, 29, 31, 72, 44, 99, 32, + 11, 9, 76, 29, 48, 96, 94, 15, 55, 20, 58, 8, 99, 40, 31, 97, 84, + 45, 77, 55, 35, 3, 14, 44, 3, 43, 42, 75, 87, 40, 73, 64, 15, 14, + 93, 29, 76, 53, 11, 31, 73, 69, 39, 37, 8, 70, 100, 58, 81}, + {76, 79, 16, 80, 93, 26, 49, 35, 68, 23, 89, 75, 63, 18, 56, 77, 11, + 86, 53, 30, 97, 84, 2, 31, 89, 5, 6, 24, 5, 64, 4, 47, 43, 87, + 26, 1, 13, 41, 3, 47, 65, 92, 88, 94, 9, 44, 70, 87, 29, 89, 16, + 25, 72, 85, 56, 26, 57, 62, 50, 62, 93, 55, 8, 1, 7, 1, 2, 20, + 42, 5, 34, 73, 63, 21, 66, 39, 31, 2, 25, 60, 91, 8, 51, 29, 59, + 74, 55, 15, 1, 5, 77, 94, 26, 52, 95, 33, 19, 64, 20, 27}, + {35, 54, 0, 99, 41, 32, 37, 73, 34, 28, 99, 92, 2, 50, 20, 62, 23, + 75, 77, 24, 46, 20, 85, 72, 38, 45, 72, 57, 75, 92, 84, 10, 11, 50, + 75, 18, 83, 78, 91, 83, 72, 56, 74, 75, 72, 60, 36, 95, 1, 79, 85, + 47, 99, 35, 19, 36, 47, 91, 59, 21, 48, 43, 31, 59, 59, 72, 77, 7, + 49, 34, 91, 21, 56, 30, 96, 27, 57, 98, 88, 58, 76, 38, 4, 41, 74, + 90, 43, 20, 46, 2, 7, 94, 11, 39, 18, 70, 77, 62, 78, 26}, + {62, 34, 47, 17, 30, 8, 10, 87, 72, 98, 44, 47, 1, 15, 54, 75, 4, + 98, 61, 17, 100, 69, 10, 10, 74, 96, 46, 50, 23, 23, 42, 85, 23, 55, + 68, 54, 29, 44, 40, 0, 41, 51, 14, 42, 66, 68, 84, 36, 31, 10, 53, + 30, 45, 30, 6, 85, 25, 53, 1, 14, 42, 43, 65, 66, 65, 32, 86, 94, + 42, 25, 95, 83, 42, 8, 91, 74, 42, 40, 10, 74, 51, 63, 70, 62, 59, + 77, 47, 50, 96, 48, 64, 3, 57, 28, 35, 21, 26, 20, 15, 68}, + {12, 9, 16, 54, 84, 74, 28, 92, 13, 4, 65, 30, 33, 1, 93, 93, 78, + 5, 42, 39, 53, 73, 42, 9, 0, 78, 98, 94, 98, 12, 61, 76, 88, 44, + 30, 37, 17, 24, 28, 97, 28, 60, 27, 61, 27, 86, 53, 4, 91, 62, 9, + 9, 34, 17, 85, 0, 61, 82, 94, 25, 60, 21, 0, 13, 65, 30, 50, 48, + 54, 45, 44, 48, 71, 37, 9, 98, 89, 62, 68, 45, 23, 43, 54, 23, 60, + 5, 24, 21, 87, 17, 12, 13, 4, 12, 26, 69, 9, 43, 83, 29}, + {88, 94, 78, 24, 30, 87, 21, 86, 14, 55, 30, 4, 98, 51, 27, 57, 56, + 17, 44, 8, 35, 56, 21, 39, 69, 14, 75, 44, 57, 23, 73, 10, 16, 50, + 34, 13, 2, 55, 99, 17, 9, 95, 21, 6, 45, 14, 29, 0, 32, 74, 9, + 33, 96, 97, 38, 30, 10, 79, 74, 33, 2, 47, 43, 85, 63, 77, 98, 66, + 98, 62, 83, 73, 57, 70, 45, 68, 50, 75, 69, 82, 14, 44, 81, 9, 6, + 19, 40, 84, 64, 80, 16, 66, 26, 60, 51, 90, 36, 14, 55, 34}, + {43, 3, 73, 100, 73, 18, 67, 89, 93, 1, 37, 6, 11, 17, 82, 85, 2, + 88, 68, 67, 68, 50, 99, 60, 9, 15, 49, 12, 30, 70, 12, 73, 73, 85, + 38, 11, 2, 71, 67, 95, 39, 3, 67, 16, 20, 15, 0, 90, 69, 34, 22, + 36, 85, 20, 63, 94, 36, 11, 72, 32, 48, 84, 71, 87, 69, 75, 65, 37, + 11, 31, 99, 50, 34, 31, 33, 20, 46, 100, 76, 15, 34, 98, 17, 18, 18, + 80, 78, 20, 58, 16, 18, 72, 100, 55, 58, 34, 96, 89, 72, 6}, + {86, 36, 23, 86, 67, 56, 6, 80, 21, 48, 61, 55, 46, 78, 39, 30, 24, + 84, 50, 48, 100, 34, 19, 65, 89, 43, 100, 84, 32, 37, 56, 17, 73, 79, + 3, 5, 0, 76, 85, 22, 23, 45, 43, 35, 23, 83, 65, 13, 32, 14, 61, + 31, 14, 46, 96, 2, 89, 61, 52, 87, 64, 8, 4, 2, 53, 74, 8, 54, + 15, 93, 42, 38, 4, 85, 40, 94, 67, 4, 6, 99, 86, 33, 96, 100, 79, + 58, 69, 33, 85, 20, 20, 49, 95, 91, 17, 14, 64, 25, 68, 79}, + {85, 76, 83, 89, 60, 22, 82, 94, 27, 54, 58, 79, 87, 54, 78, 31, 78, + 12, 64, 62, 100, 84, 10, 94, 74, 28, 7, 37, 19, 41, 82, 70, 16, 31, + 58, 43, 19, 5, 36, 12, 59, 94, 91, 11, 13, 69, 42, 91, 81, 6, 53, + 80, 90, 29, 40, 30, 23, 13, 33, 9, 21, 15, 79, 3, 12, 37, 46, 31, + 8, 48, 44, 34, 42, 34, 45, 21, 69, 54, 12, 16, 60, 65, 96, 15, 60, + 1, 45, 84, 82, 45, 93, 2, 60, 71, 5, 38, 74, 18, 69, 49}, + {66, 12, 83, 74, 47, 94, 96, 15, 47, 74, 31, 6, 4, 94, 89, 64, 61, + 100, 13, 42, 44, 72, 44, 70, 9, 16, 7, 83, 34, 77, 98, 66, 55, 80, + 40, 1, 74, 1, 84, 20, 41, 81, 94, 45, 40, 48, 8, 1, 47, 89, 43, + 58, 60, 54, 27, 69, 36, 1, 18, 70, 44, 15, 1, 99, 96, 7, 0, 35, + 75, 50, 21, 15, 30, 14, 60, 37, 62, 35, 38, 76, 23, 47, 33, 49, 67, + 60, 18, 2, 27, 2, 38, 71, 17, 6, 70, 79, 13, 36, 80, 89}, + {86, 1, 3, 82, 15, 30, 18, 44, 31, 22, 19, 54, 36, 52, 69, 69, 78, + 53, 72, 5, 55, 76, 42, 73, 82, 11, 17, 62, 47, 98, 50, 99, 99, 19, + 81, 80, 15, 65, 23, 46, 54, 8, 66, 56, 60, 35, 24, 4, 88, 62, 76, + 43, 38, 17, 82, 86, 29, 65, 47, 42, 62, 63, 41, 26, 49, 88, 6, 64, + 18, 96, 10, 72, 4, 42, 94, 64, 77, 18, 34, 31, 80, 9, 40, 84, 27, + 21, 70, 22, 86, 83, 64, 14, 46, 4, 40, 61, 92, 46, 24, 10}, + {42, 0, 48, 12, 9, 42, 76, 86, 26, 77, 83, 5, 86, 22, 56, 79, 43, + 92, 0, 96, 40, 65, 76, 52, 35, 15, 12, 94, 28, 3, 3, 36, 3, 17, + 48, 79, 25, 90, 65, 51, 66, 47, 23, 18, 36, 79, 97, 79, 36, 98, 40, + 76, 28, 15, 28, 63, 98, 40, 56, 25, 43, 25, 27, 13, 9, 75, 92, 34, + 30, 22, 86, 97, 36, 75, 81, 72, 19, 77, 16, 55, 40, 23, 97, 68, 4, + 24, 31, 1, 31, 53, 93, 40, 79, 19, 19, 88, 60, 78, 88, 91}, + {66, 39, 53, 1, 13, 33, 39, 32, 76, 22, 53, 16, 11, 16, 84, 15, 40, + 81, 17, 37, 34, 76, 44, 79, 96, 63, 32, 21, 6, 86, 11, 73, 25, 30, + 40, 4, 29, 46, 3, 5, 68, 56, 21, 79, 72, 71, 60, 79, 18, 77, 82, + 52, 53, 25, 97, 14, 55, 95, 35, 61, 80, 13, 33, 4, 9, 74, 9, 39, + 19, 12, 10, 53, 34, 98, 98, 73, 68, 57, 17, 52, 0, 99, 3, 19, 24, + 66, 100, 79, 60, 34, 39, 40, 13, 39, 44, 23, 79, 19, 28, 64}, + {98, 38, 16, 32, 35, 80, 71, 69, 36, 88, 21, 2, 86, 91, 21, 76, 57, + 87, 20, 83, 21, 26, 22, 0, 65, 33, 90, 9, 18, 17, 73, 16, 55, 55, + 14, 56, 34, 85, 92, 36, 38, 79, 5, 90, 35, 93, 66, 58, 80, 86, 41, + 67, 78, 29, 67, 8, 62, 57, 17, 47, 74, 90, 63, 96, 44, 43, 17, 44, + 27, 75, 47, 65, 53, 52, 54, 55, 10, 86, 12, 90, 38, 53, 56, 15, 49, + 23, 24, 77, 46, 41, 23, 19, 98, 86, 81, 7, 95, 65, 18, 21}, + {39, 31, 52, 59, 49, 73, 13, 59, 24, 25, 49, 62, 45, 4, 44, 60, 94, + 34, 36, 39, 41, 60, 25, 4, 11, 72, 12, 6, 36, 97, 94, 76, 27, 12, + 34, 76, 85, 13, 34, 75, 4, 83, 3, 49, 54, 47, 8, 47, 47, 11, 53, + 88, 71, 44, 59, 48, 15, 71, 54, 52, 67, 14, 27, 94, 26, 27, 69, 77, + 6, 69, 51, 10, 52, 54, 26, 72, 67, 0, 85, 80, 11, 37, 34, 48, 81, + 93, 97, 97, 29, 16, 14, 96, 30, 7, 55, 56, 34, 90, 99, 6}, + {58, 50, 16, 76, 70, 8, 47, 3, 9, 32, 49, 87, 69, 83, 35, 16, 75, + 98, 79, 3, 13, 93, 65, 44, 100, 86, 66, 100, 75, 65, 5, 33, 81, 88, + 75, 16, 97, 22, 86, 72, 54, 35, 58, 89, 17, 59, 71, 59, 56, 49, 28, + 70, 41, 60, 80, 40, 45, 11, 5, 20, 42, 10, 19, 22, 99, 94, 5, 61, + 82, 91, 32, 1, 25, 90, 57, 9, 49, 27, 34, 71, 43, 62, 40, 50, 21, + 86, 91, 33, 98, 62, 53, 39, 73, 38, 28, 37, 98, 33, 98, 80}, + {90, 29, 47, 82, 85, 3, 57, 100, 98, 91, 71, 40, 18, 77, 90, 6, 63, + 46, 39, 26, 8, 58, 31, 47, 96, 59, 84, 59, 58, 47, 38, 48, 76, 52, + 96, 26, 55, 52, 26, 52, 42, 63, 58, 26, 5, 48, 32, 68, 60, 37, 60, + 68, 95, 92, 14, 56, 16, 64, 15, 75, 10, 19, 89, 52, 71, 84, 79, 26, + 1, 71, 44, 43, 100, 2, 35, 4, 16, 68, 39, 76, 4, 99, 10, 100, 56, + 91, 21, 73, 55, 36, 13, 31, 56, 1, 84, 93, 51, 28, 85, 52}, + {65, 29, 61, 64, 98, 96, 68, 13, 29, 73, 55, 34, 38, 65, 100, 94, 56, + 87, 32, 77, 23, 45, 7, 45, 12, 91, 37, 29, 85, 22, 47, 49, 17, 74, + 12, 14, 70, 47, 94, 65, 86, 48, 99, 23, 13, 64, 84, 35, 51, 15, 11, + 40, 27, 18, 51, 5, 76, 88, 1, 26, 76, 48, 76, 59, 22, 54, 73, 58, + 67, 32, 22, 53, 81, 88, 76, 60, 17, 25, 95, 34, 7, 5, 40, 34, 90, + 91, 5, 31, 45, 6, 58, 20, 21, 33, 80, 9, 53, 18, 67, 20}, + {51, 55, 73, 31, 42, 14, 57, 26, 40, 51, 60, 13, 22, 0, 47, 78, 91, + 18, 9, 1, 92, 33, 22, 79, 32, 68, 88, 85, 86, 20, 71, 2, 75, 43, + 100, 84, 24, 56, 9, 30, 6, 35, 43, 95, 1, 56, 73, 59, 40, 48, 60, + 31, 81, 82, 9, 12, 15, 97, 63, 1, 83, 34, 70, 58, 43, 70, 41, 67, + 25, 16, 63, 99, 17, 5, 93, 19, 27, 31, 78, 68, 79, 37, 99, 59, 86, + 75, 37, 0, 37, 67, 68, 20, 0, 38, 78, 43, 7, 85, 77, 99}, + {67, 39, 97, 84, 11, 90, 2, 38, 20, 46, 5, 100, 50, 71, 24, 35, 45, + 28, 1, 82, 95, 36, 68, 61, 40, 11, 70, 47, 62, 46, 11, 28, 52, 8, + 79, 63, 98, 81, 67, 84, 94, 39, 49, 43, 9, 40, 78, 20, 68, 45, 68, + 28, 81, 36, 89, 20, 47, 58, 33, 9, 71, 45, 37, 22, 53, 82, 51, 16, + 29, 84, 100, 22, 22, 15, 65, 98, 55, 8, 17, 22, 19, 86, 16, 0, 21, + 4, 87, 34, 28, 20, 43, 99, 31, 47, 87, 50, 28, 3, 66, 57}, + {88, 31, 45, 76, 46, 9, 74, 0, 84, 91, 89, 3, 42, 4, 3, 63, 8, + 56, 98, 3, 76, 6, 1, 73, 53, 55, 22, 48, 58, 54, 71, 11, 86, 16, + 88, 98, 92, 61, 99, 76, 17, 53, 79, 60, 58, 48, 89, 32, 3, 52, 35, + 46, 59, 3, 18, 78, 24, 7, 92, 48, 61, 63, 60, 12, 79, 47, 10, 70, + 74, 75, 11, 91, 27, 90, 16, 51, 3, 5, 84, 74, 57, 85, 19, 15, 54, + 3, 60, 44, 10, 51, 93, 38, 13, 52, 50, 58, 65, 60, 28, 38}, + {34, 39, 95, 28, 96, 11, 79, 99, 16, 28, 38, 73, 80, 57, 55, 100, 27, + 14, 44, 3, 65, 36, 41, 79, 54, 92, 2, 18, 17, 30, 56, 18, 36, 50, + 46, 98, 27, 24, 62, 43, 19, 0, 83, 99, 23, 37, 98, 50, 51, 41, 20, + 82, 43, 61, 26, 97, 18, 29, 14, 2, 25, 36, 20, 61, 53, 66, 24, 80, + 56, 87, 90, 41, 87, 72, 39, 9, 8, 3, 26, 25, 44, 46, 73, 54, 73, + 100, 50, 58, 95, 31, 60, 19, 67, 80, 47, 86, 11, 71, 32, 33}, + {23, 21, 75, 9, 93, 80, 86, 67, 83, 11, 58, 94, 23, 30, 47, 96, 96, + 63, 19, 56, 94, 79, 42, 27, 24, 89, 12, 1, 25, 44, 35, 49, 65, 76, + 58, 23, 21, 9, 90, 4, 87, 13, 64, 9, 10, 77, 72, 72, 39, 91, 28, + 33, 70, 70, 60, 60, 24, 72, 62, 49, 83, 63, 64, 47, 4, 89, 37, 25, + 98, 26, 96, 85, 6, 25, 94, 16, 1, 31, 54, 41, 22, 48, 74, 58, 17, + 100, 17, 7, 71, 45, 57, 19, 74, 20, 67, 78, 75, 3, 70, 73}, + {96, 65, 57, 68, 57, 16, 50, 58, 14, 4, 99, 36, 52, 38, 60, 36, 37, + 43, 43, 75, 89, 66, 94, 62, 53, 60, 6, 27, 29, 76, 100, 92, 6, 22, + 59, 63, 5, 9, 21, 19, 13, 86, 21, 31, 24, 47, 67, 61, 90, 10, 35, + 44, 42, 29, 73, 95, 55, 79, 22, 51, 54, 88, 42, 26, 10, 0, 56, 82, + 9, 77, 67, 89, 28, 88, 20, 52, 34, 53, 80, 90, 29, 14, 34, 72, 9, + 6, 66, 65, 85, 54, 82, 4, 42, 23, 97, 18, 23, 52, 100, 100}, + {95, 66, 54, 23, 19, 40, 75, 19, 60, 20, 8, 89, 35, 42, 60, 10, 48, + 93, 41, 99, 46, 22, 69, 54, 45, 66, 38, 35, 17, 37, 0, 12, 69, 54, + 35, 54, 61, 76, 73, 20, 97, 48, 8, 98, 90, 35, 7, 4, 94, 15, 69, + 5, 37, 38, 60, 83, 3, 98, 84, 20, 1, 84, 99, 36, 3, 100, 57, 64, + 76, 96, 50, 38, 43, 25, 35, 100, 60, 8, 70, 53, 23, 38, 58, 27, 42, + 84, 76, 11, 48, 59, 99, 15, 8, 97, 51, 11, 97, 7, 42, 38}, + {70, 58, 76, 12, 83, 77, 11, 42, 51, 47, 61, 75, 86, 86, 68, 94, 69, + 43, 5, 16, 1, 3, 31, 9, 100, 49, 87, 62, 22, 95, 100, 92, 53, 41, + 71, 35, 17, 48, 44, 69, 96, 4, 9, 47, 56, 77, 40, 25, 86, 45, 7, + 87, 48, 5, 62, 14, 20, 48, 76, 8, 43, 76, 67, 62, 16, 37, 97, 0, + 85, 6, 35, 80, 78, 10, 26, 33, 53, 33, 24, 38, 78, 32, 24, 93, 3, + 52, 6, 90, 100, 48, 98, 8, 90, 64, 70, 6, 67, 33, 73, 52}, + {39, 7, 98, 16, 84, 91, 16, 36, 23, 40, 74, 67, 38, 64, 59, 41, 15, + 31, 97, 81, 80, 61, 56, 35, 24, 25, 41, 92, 24, 80, 9, 30, 53, 6, + 12, 36, 97, 28, 72, 86, 69, 11, 53, 6, 75, 78, 14, 56, 76, 10, 37, + 55, 37, 93, 56, 62, 84, 98, 19, 75, 43, 28, 4, 97, 0, 83, 32, 98, + 11, 71, 49, 80, 82, 1, 52, 23, 80, 66, 45, 55, 43, 48, 76, 80, 40, + 31, 7, 91, 95, 93, 31, 38, 20, 1, 0, 88, 84, 32, 51, 95}, + {2, 100, 40, 85, 1, 59, 74, 47, 91, 18, 68, 33, 67, 9, 80, 73, 6, + 53, 29, 1, 46, 60, 5, 32, 61, 5, 86, 11, 3, 36, 72, 6, 36, 12, + 57, 37, 71, 97, 50, 61, 14, 17, 61, 47, 93, 6, 20, 99, 25, 15, 66, + 37, 76, 71, 36, 2, 42, 21, 80, 12, 58, 52, 18, 94, 30, 41, 97, 67, + 3, 12, 94, 17, 96, 54, 31, 88, 26, 51, 86, 18, 66, 52, 55, 7, 89, + 91, 77, 98, 79, 56, 9, 36, 74, 94, 96, 3, 34, 92, 70, 37}, + {3, 64, 20, 65, 84, 51, 52, 77, 68, 37, 95, 0, 55, 15, 7, 10, 6, + 50, 7, 85, 73, 16, 87, 46, 9, 82, 50, 9, 39, 86, 12, 8, 49, 32, + 73, 100, 50, 24, 76, 17, 27, 70, 17, 83, 51, 92, 93, 23, 7, 66, 74, + 80, 82, 60, 26, 57, 41, 42, 66, 80, 27, 78, 88, 77, 76, 26, 42, 25, + 50, 17, 9, 78, 53, 26, 26, 3, 84, 85, 27, 92, 50, 0, 71, 31, 27, + 63, 88, 34, 4, 19, 14, 32, 97, 68, 75, 72, 95, 16, 64, 10}, + {100, 73, 88, 52, 65, 80, 21, 49, 64, 14, 6, 13, 15, 77, 10, 8, 6, + 64, 42, 10, 83, 22, 8, 45, 91, 49, 84, 51, 65, 47, 27, 30, 86, 82, + 82, 50, 61, 70, 65, 92, 84, 71, 71, 65, 14, 82, 73, 20, 11, 15, 97, + 61, 37, 5, 72, 94, 54, 55, 10, 86, 68, 38, 15, 53, 19, 64, 70, 80, + 33, 34, 37, 16, 72, 8, 82, 86, 56, 54, 5, 33, 69, 1, 94, 73, 73, + 66, 66, 27, 87, 77, 79, 55, 14, 94, 74, 100, 57, 43, 45, 90}, + {44, 83, 73, 15, 91, 54, 0, 46, 74, 72, 79, 9, 39, 39, 82, 12, 71, + 13, 5, 57, 90, 84, 11, 70, 77, 52, 69, 0, 95, 14, 56, 38, 63, 28, + 19, 53, 48, 19, 65, 89, 57, 9, 98, 97, 14, 45, 8, 85, 58, 80, 42, + 14, 63, 19, 50, 5, 71, 86, 72, 66, 66, 28, 70, 28, 56, 90, 81, 71, + 75, 11, 59, 32, 87, 56, 28, 1, 67, 2, 86, 91, 82, 27, 71, 10, 47, + 21, 82, 17, 6, 54, 49, 38, 82, 86, 66, 3, 75, 12, 74, 15}, + {23, 99, 47, 9, 20, 75, 10, 87, 43, 63, 44, 91, 90, 14, 0, 2, 35, + 83, 87, 7, 2, 1, 45, 84, 87, 77, 53, 27, 89, 94, 43, 78, 92, 90, + 88, 12, 31, 64, 65, 74, 93, 8, 65, 49, 23, 31, 51, 24, 80, 3, 99, + 82, 5, 9, 31, 92, 87, 85, 19, 41, 78, 62, 19, 35, 17, 73, 13, 48, + 2, 79, 89, 96, 53, 19, 44, 42, 50, 61, 67, 30, 65, 31, 78, 36, 40, + 9, 94, 93, 60, 12, 34, 3, 40, 53, 38, 24, 92, 52, 72, 94}, + {97, 60, 89, 15, 79, 99, 58, 96, 26, 91, 92, 91, 21, 69, 93, 27, 44, + 86, 20, 3, 65, 54, 6, 71, 73, 11, 95, 64, 29, 67, 23, 92, 93, 79, + 6, 38, 77, 30, 33, 2, 20, 91, 59, 7, 59, 51, 1, 3, 3, 21, 73, + 68, 41, 46, 4, 80, 57, 100, 9, 86, 32, 32, 43, 24, 10, 49, 28, 88, + 80, 27, 56, 66, 17, 82, 40, 77, 32, 41, 46, 1, 28, 85, 35, 69, 30, + 40, 14, 53, 39, 23, 4, 71, 55, 47, 61, 66, 97, 56, 19, 42}, + {83, 41, 74, 0, 22, 80, 77, 21, 20, 89, 22, 14, 73, 58, 83, 70, 98, + 63, 22, 2, 86, 27, 39, 41, 40, 66, 73, 36, 21, 92, 44, 4, 32, 85, + 4, 21, 64, 47, 42, 85, 1, 64, 65, 40, 88, 48, 9, 51, 77, 99, 53, + 63, 92, 58, 3, 31, 24, 76, 34, 11, 33, 44, 15, 31, 28, 86, 52, 93, + 99, 94, 43, 100, 24, 7, 40, 11, 21, 15, 63, 99, 13, 82, 61, 4, 40, + 30, 2, 30, 72, 36, 41, 71, 80, 23, 1, 8, 8, 20, 67, 7}}; + +struct _NODE { + long iDist; + long iPrev; +}; +typedef struct _NODE NODE; + +struct _QITEM { + long iNode; + long iDist; + long iPrev; + struct _QITEM *qNext; +}; +typedef struct _QITEM QITEM; + +QITEM *qHead = NULL; + +long AdjMatrix[NUM_NODES][NUM_NODES]; + +long g_qCount = 0; +NODE rgnNodes[NUM_NODES]; +long ch; +long iPrev, iNode; +long i, iCost, iDist; + +void print_path(NODE *rgnNodes, long chNode) { + if (rgnNodes[chNode].iPrev != NONE) { + print_path(rgnNodes, rgnNodes[chNode].iPrev); + } + printf(" %ld", chNode); +} + +void enqueue(long iNode, long iDist, long iPrev) { + QITEM *qNew = (QITEM *)s_malloc(sizeof(QITEM)); + QITEM *qLast = qHead; + + if (!qNew) { + printf("Out of memory\n"); + while (1) + ; + } + qNew->iNode = iNode; + qNew->iDist = iDist; + qNew->iPrev = iPrev; + qNew->qNext = NULL; + + if (!qLast) { + qHead = qNew; + } else { + while (qLast->qNext) + qLast = qLast->qNext; + qLast->qNext = qNew; + } + g_qCount++; + // ASSERT(g_qCount); +} + +void dequeue(long *piNode, long *piDist, long *piPrev) { + QITEM *qKill = qHead; + + if (qHead) { + // ASSERT(g_qCount); + *piNode = qHead->iNode; + *piDist = qHead->iDist; + *piPrev = qHead->iPrev; + qHead = qHead->qNext; + s_free(qKill); + g_qCount--; + } +} + +int qcount(void) { return (g_qCount); } + +void dijkstra(long chStart, long chEnd) { + for (ch = 0; ch < NUM_NODES; ch++) { + rgnNodes[ch].iDist = NONE; + rgnNodes[ch].iPrev = NONE; + } + + if (chStart == chEnd) { + printf("Shortest path is 0 in cost. Just stay where you are.\n"); + } else { + rgnNodes[chStart].iDist = 0; + rgnNodes[chStart].iPrev = NONE; + + enqueue(chStart, 0, NONE); + + while (qcount() > 0) { + dequeue(&iNode, &iDist, &iPrev); + for (i = 0; i < NUM_NODES; i++) { + if ((iCost = AdjMatrix[iNode][i]) != NONE) { + if ((NONE == rgnNodes[i].iDist) || + (rgnNodes[i].iDist > (iCost + iDist))) { + rgnNodes[i].iDist = iDist + iCost; + rgnNodes[i].iPrev = iNode; + enqueue(i, iDist + iCost, iNode); + } + } + } + } + + printf("Shortest path is %ld in cost.\n", rgnNodes[chEnd].iDist); + printf("Path is: \n"); + print_path(rgnNodes, chEnd); + printf("\n"); + } +} + +int main(int argc, char *argv[]) { + // printf("Switched stack\n"); + long i, j; + + /* make a fully connected matrix */ + for (i = 0; i < NUM_NODES; i++) { + for (j = 0; j < NUM_NODES; j++) { + /* make it more sparce */ + // printf("Sanity Check: %d, %d \n", sanity, *rot_cnt); + AdjMatrix[i][j] = dijkstra_input_data[i][j]; + } + } + + /* finds 10 shortest paths between nodes */ + for (i = 0, j = NUM_NODES / 2; i < 100; i++, j++) { + j = j % NUM_NODES; + // printf("Sanity Check: %d, %d \n", sanity, *rot_cnt); + dijkstra(i, j); + } +} + +void print_dijkstra() { + for (int i = 0; i < 100; i++) { + printf("Data @ %d : %d\n", i, (int)dijkstra_input_data[i][0]); + } +} diff --git a/test/src/duff.c b/test/src/duff.c new file mode 100755 index 0000000..a9cc51c --- /dev/null +++ b/test/src/duff.c @@ -0,0 +1,86 @@ +/* $Id: duff.c,v 1.2 2005/04/04 11:34:58 csg Exp $ */ + +/*---------------------------------------------------------------------- + * WCET Benchmark created by Jakob Engblom, Uppsala university, + * February 2000. + * + * The purpose of this benchmark is to force the compiler to emit an + * unstructured loop, which is usually problematic for WCET tools to + * handle. + * + * The execution time should be constant. + * + * The original code is "Duff's Device", see the Jargon File, e.g. at + * http://www.tf.hut.fi/cgi-bin/jargon. Created in the early 1980s + * as a way to express loop unrolling in C. + * + *----------------------------------------------------------------------*/ + +#define ARRAYSIZE 100 +#define INVOCATION_COUNT 43 /* exec time depends on this one! */ + + +void duffcopy( char *to, char *from, int count) +{ + int n=(count+7)/8; + switch(count%8){ + case 0: do{ *to++ = *from++; + case 7: *to++ = *from++; + case 6: *to++ = *from++; + case 5: *to++ = *from++; + case 4: *to++ = *from++; + case 3: *to++ = *from++; + case 2: *to++ = *from++; + case 1: *to++ = *from++; + } while(--n>0); + } +} + + +void initialize( char *arr, int length) +{ + int i; + for(i=0;i> 15); +} + + +/***************************************************** +* Dot Product * +*****************************************************/ +long int +mac(const short *a, const short *b, long int sqr, long int *sum) +{ + long int i; + long int dotp = *sum; + + for (i = 0; i < 150; i++) { + dotp += b[i] * a[i]; + sqr += b[i] * b[i]; + } + + *sum = dotp; + return sqr; +} + + +/***************************************************** +* FIR Filter * +*****************************************************/ +void +fir(const short array1[], const short coeff[], long int output[]) +{ + long int i, j, sum; + + for (i = 0; i < N - ORDER; i++) { + sum = 0; + for (j = 0; j < ORDER; j++) { + sum += array1[i + j] * coeff[j]; + } + output[i] = sum >> 15; + } +} + +/**************************************************** +* FIR Filter with Redundant Load Elimination + +By doing two outer loops simultaneously, you can potentially reuse data (depending on the DSP architecture). +x and h only need to be loaded once, therefore reducing redundant loads. +This reduces memory bandwidth and power. +*****************************************************/ +void +fir_no_red_ld(const short x[], const short h[], long int y[]) +{ + long int i, j; + long int sum0, sum1; + short x0, x1, h0, h1; + for (j = 0; j < 100; j += 2) { + sum0 = 0; + sum1 = 0; + x0 = x[j]; + for (i = 0; i < 32; i += 2) { + x1 = x[j + i + 1]; + h0 = h[i]; + sum0 += x0 * h0; + sum1 += x1 * h0; + x0 = x[j + i + 2]; + h1 = h[i + 1]; + sum0 += x1 * h1; + sum1 += x0 * h1; + } + y[j] = sum0 >> 15; + y[j + 1] = sum1 >> 15; + } +} + +/******************************************************* +* Lattice Synthesis * +* This function doesn't follow the typical DSP multiply two vector operation, but it will point out the compiler's flexibility ********************************************************/ +long int +latsynth(short b[], const short k[], long int n, long int f) +{ + long int i; + + f -= b[n - 1] * k[n - 1]; + for (i = n - 2; i >= 0; i--) { + f -= b[i] * k[i]; + b[i + 1] = b[i] + ((k[i] * (f >> 16)) >> 16); + } + b[0] = f >> 16; + return f; +} + +/***************************************************** +* IIR Filter * +*****************************************************/ +void +iir1(const short *coefs, const short *input, long int *optr, long int *state) +{ + long int x; + long int t; + long int n; + + x = input[0]; + for (n = 0; n < 50; n++) { + t = x + ((coefs[2] * state[0] + coefs[3] * state[1]) >> 15); + x = t + ((coefs[0] * state[0] + coefs[1] * state[1]) >> 15); + state[1] = state[0]; + state[0] = t; + coefs += 4; /* point to next filter coefs */ + state += 2; /* point to next filter states */ + } + *optr++ = x; +} + +/***************************************************** +* Vocoder Codebook Search * +*****************************************************/ +long int +codebook(long int mask, long int bitchanged, long int numbasis, long int codeword, long int g, const short *d, short ddim, short theta) +/* + * dfm (mask=d bitchanged=1 numbasis=17 codeword=e[0] , g=d, d=a, ddim=c, + * theta =1 + */ + +{ + long int j; + long int tmpMask; + + tmpMask = mask << 1; + for (j = bitchanged + 1; j <= numbasis; j++) { + + + +/* + * The following code is removed since it gave a memory access exception. + * It is OK since the return value does not control the flow. + * The loop always iterates a fixed number of times independent of the loop body. + + if (theta == !(!(codeword & tmpMask))) + g += *(d + bitchanged * ddim + j); + else + g -= *(d + bitchanged * ddim + j); + tmpMask <<= 1; +*/ + } + return g; +} + + +/***************************************************** +* JPEG Discrete Cosine Transform * +*****************************************************/ +void +jpegdct(short *d, short *r) +{ + long int t[12]; + short i, j, k, m, n, p; + for (k = 1, m = 0, n = 13, p = 8; k <= 8; k += 7, m += 3, n += 3, p -= 7, d -= 64) { + for (i = 0; i < 8; i++, d += p) { + for (j = 0; j < 4; j++) { + t[j] = d[k * j] + d[k * (7 - j)]; + t[7 - j] = d[k * j] - d[k * (7 - j)]; + } + t[8] = t[0] + t[3]; + t[9] = t[0] - t[3]; + t[10] = t[1] + t[2]; + t[11] = t[1] - t[2]; + d[0] = (t[8] + t[10]) >> m; + d[4 * k] = (t[8] - t[10]) >> m; + t[8] = (short) (t[11] + t[9]) * r[10]; + d[2 * k] = t[8] + (short) ((t[9] * r[9]) >> n); + d[6 * k] = t[8] + (short) ((t[11] * r[11]) >> n); + t[0] = (short) (t[4] + t[7]) * r[2]; + t[1] = (short) (t[5] + t[6]) * r[0]; + t[2] = t[4] + t[6]; + t[3] = t[5] + t[7]; + t[8] = (short) (t[2] + t[3]) * r[8]; + t[2] = (short) t[2] * r[1] + t[8]; + t[3] = (short) t[3] * r[3] + t[8]; + d[7 * k] = (short) (t[4] * r[4] + t[0] + t[2]) >> n; + d[5 * k] = (short) (t[5] * r[6] + t[1] + t[3]) >> n; + d[3 * k] = (short) (t[6] * r[5] + t[1] + t[2]) >> n; + d[1 * k] = (short) (t[7] * r[7] + t[0] + t[3]) >> n; + } + } +} + + +int +main(void) +{ + short a[200] = {0x0000, 0x07ff, 0x0c00, 0x0800, 0x0200, 0xf800, 0xf300, 0x0400, 0x0000, + 0x07ff, 0x0c00, 0x0800, 0x0200, 0xf800, 0xf300, 0x0400, 0x0000, 0x07ff, 0x0c00, + 0x0800, 0x0200, 0xf800, 0xf300, 0x0400, 0x0000, 0x07ff, 0x0c00, 0x0800, 0x0200, + 0xf800, 0xf300, 0x0400, 0x0000, 0x07ff, 0x0c00, 0x0800, 0x0200, 0xf800, 0xf300, + 0x0400, 0x0000, 0x07ff, 0x0c00, 0x0800, 0x0200, 0xf800, 0xf300, 0x0400, 0x0000, + 0x07ff, 0x0c00, 0x0800, 0x0200, 0xf800, 0xf300, 0x0400, 0x0000, 0x07ff, 0x0c00, + 0x0800, 0x0200, 0xf800, 0xf300, 0x0400, 0x0000, 0x07ff, 0x0c00, 0x0800, 0x0200, + 0xf800, 0xf300, 0x0400, 0x0000, 0x07ff, 0x0c00, 0x0800, 0x0200, 0xf800, 0xf300, + 0x0400, 0x0000, 0x07ff, 0x0c00, 0x0800, 0x0200, 0xf800, 0xf300, 0x0400, 0x0000, + 0x07ff, 0x0c00, 0x0800, 0x0200, 0xf800, 0xf300, 0x0400, 0x0000, 0x07ff, 0x0c00, + 0x0800, 0x0200, 0xf800, 0xf300, 0x0400, 0x0000, 0x07ff, 0x0c00, 0x0800, 0x0200, + 0xf800, 0xf300, 0x0400, 0x0000, 0x07ff, 0x0c00, 0x0800, 0x0200, 0xf800, 0xf300, + 0x0400, 0x0000, 0x07ff, 0x0c00, 0x0800, 0x0200, 0xf800, 0xf300, 0x0400, 0x0000, + 0x07ff, 0x0c00, 0x0800, 0x0200, 0xf800, 0xf300, 0x0400, 0x0000, 0x07ff, 0x0c00, + 0x0800, 0x0200, 0xf800, 0xf300, 0x0400, 0x0000, 0x07ff, 0x0c00, 0x0800, 0x0200, + 0xf800, 0xf300, 0x0400, 0x0000, 0x07ff, 0x0c00, 0x0800, 0x0200, 0xf800, 0xf300, + 0x0400, 0x0000, 0x07ff, 0x0c00, 0x0800, 0x0200, 0xf800, 0xf300, 0x0400, 0x0000, + 0x07ff, 0x0c00, 0x0800, 0x0200, 0xf800, 0xf300, 0x0400, 0x0000, 0x07ff, 0x0c00, + 0x0800, 0x0200, 0xf800, 0xf300, 0x0400, 0x0000, 0x07ff, 0x0c00, 0x0800, 0x0200, + 0xf800, 0xf300, 0x0400, 0x0000, 0x07ff, 0x0c00, 0x0800, 0x0200, 0xf800, 0xf300, 0x0400 + }; + short b[200] = + {0x0c60, 0x0c40, 0x0c20, 0x0c00, 0xf600, 0xf400, 0xf200, 0xf000, 0x0c60, + 0x0c40, 0x0c20, 0x0c00, 0xf600, 0xf400, 0xf200, 0xf000, 0x0c60, 0x0c40, 0x0c20, + 0x0c00, 0xf600, 0xf400, 0xf200, 0xf000, 0x0c60, 0x0c40, 0x0c20, 0x0c00, 0xf600, + 0xf400, 0xf200, 0xf000, 0x0c60, 0x0c40, 0x0c20, 0x0c00, 0xf600, 0xf400, 0xf200, + 0xf000, 0x0c60, 0x0c40, 0x0c20, 0x0c00, 0xf600, 0xf400, 0xf200, 0xf000, 0x0c60, + 0x0c40, 0x0c20, 0x0c00, 0xf600, 0xf400, 0xf200, 0xf000, 0x0c60, 0x0c40, 0x0c20, + 0x0c00, 0xf600, 0xf400, 0xf200, 0xf000, 0x0c60, 0x0c40, 0x0c20, 0x0c00, 0xf600, + 0xf400, 0xf200, 0xf000, 0x0c60, 0x0c40, 0x0c20, 0x0c00, 0xf600, 0xf400, 0xf200, + 0xf000, 0x0c60, 0x0c40, 0x0c20, 0x0c00, 0xf600, 0xf400, 0xf200, 0xf000, 0x0c60, + 0x0c40, 0x0c20, 0x0c00, 0xf600, 0xf400, 0xf200, 0xf000, 0x0c60, 0x0c40, 0x0c20, + 0x0c00, 0xf600, 0xf400, 0xf200, 0xf000, 0x0c60, 0x0c40, 0x0c20, 0x0c00, 0xf600, + 0xf400, 0xf200, 0xf000, 0x0c60, 0x0c40, 0x0c20, 0x0c00, 0xf600, 0xf400, 0xf200, + 0xf000, 0x0c60, 0x0c40, 0x0c20, 0x0c00, 0xf600, 0xf400, 0xf200, 0xf000, 0x0c60, + 0x0c40, 0x0c20, 0x0c00, 0xf600, 0xf400, 0xf200, 0xf000, 0x0c60, 0x0c40, 0x0c20, + 0x0c00, 0xf600, 0xf400, 0xf200, 0xf000, 0x0c60, 0x0c40, 0x0c20, 0x0c00, 0xf600, + 0xf400, 0xf200, 0xf000, 0x0c60, 0x0c40, 0x0c20, 0x0c00, 0xf600, 0xf400, 0xf200, + 0xf000, 0x0c60, 0x0c40, 0x0c20, 0x0c00, 0xf600, 0xf400, 0xf200, 0xf000, 0x0c60, + 0x0c40, 0x0c20, 0x0c00, 0xf600, 0xf400, 0xf200, 0xf000, 0x0c60, 0x0c40, 0x0c20, + 0x0c00, 0xf600, 0xf400, 0xf200, 0xf000, 0x0c60, 0x0c40, 0x0c20, 0x0c00, 0xf600, + 0xf400, 0xf200, 0xf000, 0x0c60, 0x0c40, 0x0c20, 0x0c00, 0xf600, 0xf400, 0xf200, 0xf000 + }; + short c = 0x3; + long int output[200]; + long int d = 0xAAAA; + int e[1] = {0xEEEE}; + /* + * Declared as memory variable so it doesn't get optimized out + */ + + vec_mpy1(a, b, c); + c = mac(a, b, (long int) c, (long int *) output); + fir(a, b, output); + fir_no_red_ld(a, b, output); + d = latsynth(a, b, N, d); + iir1(a, b, &output[100], output); + e[0] = codebook(d, 1, 17, e[0], d, a, c, 1); + jpegdct(a, b); + return 0; +} diff --git a/test/src/expint.c b/test/src/expint.c new file mode 100755 index 0000000..1be61ff --- /dev/null +++ b/test/src/expint.c @@ -0,0 +1,156 @@ +/* $Id: expint.c,v 1.2 2005/04/04 11:34:58 csg Exp $ */ + +/************************************************************************ + * FROM: + * http://sron9907.sron.nl/manual/numrecip/c/expint.c + * + * FEATURE: + * One loop depends on a loop-invariant value to determine + * if it run or not. + * + ***********************************************************************/ + + +long int expint(int n, long int x); + +void main(void) +{ + expint(50,1); + // with expint(50,21) as argument, runs the short path + // in expint. expint(50,1) gives the longest execution time +} + +long int foo(long int x) +{ + return x*x+(8*x)<<4-x; +} + + +/* Function with same flow, different data types, + nonsensical calculations */ +long int expint(int n, long int x) +{ + int i,ii,nm1; + long int a,b,c,d,del,fact,h,psi,ans; + + nm1=n-1; /* arg=50 --> 49 */ + + if(x>1) /* take this leg? */ + { + b=x+n; + c=2e6; + d=3e7; + h=d; + + for (i=1;i<=100;i++) /* MAXIT is 100 */ + { + a = -i*(nm1+i); + b += 2; + d=10*(a*d+b); + c=b+a/c; + del=c*d; + h *= del; + if (del < 10000) + { + ans=h*-x; + return ans; + } + } + } + else /* or this leg? */ + { + // For the current argument, will always take + // '2' path here: + ans = nm1 != 0 ? 2 : 1000; + fact=1; + for (i=1;i<=100;i++) /* MAXIT */ + { + fact *= -x/i; + if (i != nm1) /* depends on parameter n */ + del = -fact/(i-nm1); + else /* this fat piece only runs ONCE */ + { /* runs on iter 49 */ + psi = 0x00FF; + for (ii=1;ii<=nm1;ii++) /* */ + psi += ii + nm1; + del=psi+fact*foo(x); + } + ans += del; + /* conditional leave removed */ + } + + } + return ans; +} + + + + + +/* #define MAXIT 100 */ +/* #define EULER 0.5772156649 */ +/* #define FPMIN 1.0e-30 */ +/* #define EPS 1.0e-7 */ +/* float expint(int n, float x) */ +/* { */ +/* void nrerror(char error_text[]); */ +/* int i,ii,nm1; */ +/* float a,b,c,d,del,fact,h,psi,ans; */ + +/* nm1=n-1; */ +/* if (n < 0 || x < 0.0 || (x==0.0 && (n==0 || n==1))) */ +/* nrerror("bad arguments in expint"); */ +/* else { */ +/* if (n == 0) */ +/* ans=exp(-x)/x; */ +/* else */ +/* { */ +/* if (x == 0.0) */ +/* ans=1.0/nm1; */ +/* else */ +/* { */ +/* if (x > 1.0) { */ +/* b=x+n; */ +/* c=1.0/FPMIN; */ +/* d=1.0/b; */ +/* h=d; */ + +/* for (i=1;i<=MAXIT;i++) */ +/* { */ +/* a = -i*(nm1+i); */ +/* b += 2.0; */ +/* d=1.0/(a*d+b); */ +/* c=b+a/c; */ +/* del=c*d; */ +/* h *= del; */ +/* if (fabs(del-1.0) < EPS) */ +/* { */ +/* ans=h*exp(-x); */ +/* return ans; */ +/* } */ +/* } */ + +/* nrerror("continued fraction failed in expint");*/ +/* } */ +/* else */ +/* { */ +/* ans = (nm1!=0 ? 1.0/nm1 : -log(x)-EULER); */ +/* fact=1.0; */ +/* for (i=1;i<=MAXIT;i++) { */ +/* fact *= -x/i; */ +/* if (i != nm1) del = -fact/(i-nm1); */ +/* else { */ +/* psi = -EULER; */ +/* for (ii=1;ii<=nm1;ii++) psi += 1.0/ii; */ +/* del=fact*(-log(x)+psi); */ +/* } */ +/* ans += del; */ +/* if (fabs(del) < fabs(ans)*EPS) return ans; */ +/* } */ +/* nrerror("series failed in expint"); */ +/* } */ +/* } */ +/* } */ +/* } */ +/* return ans; */ +/* } */ diff --git a/test/src/fdct.c b/test/src/fdct.c new file mode 100755 index 0000000..c24d026 --- /dev/null +++ b/test/src/fdct.c @@ -0,0 +1,244 @@ +/* MDH WCET BENCHMARK SUITE. */ +/* 2012/09/28, Jan Gustafsson + * Changes: + * - main() declared as int. + * - Unused variables removed. + */ +// ********************************************************************************************************* +// * FDCT.C * +// * * +// * Forward Discrete Cosine Transform * +// * Used on 8x8 image blocks * +// * to reassemble blocks in order to ease quantization compressing image information on the more * +// * significant frequency components * +// * * +// * Expected Result -> short int block[64]= { 699,164,-51,-16, 31,-15,-19, 8, * +// * 71, 14,-61, -2,-11,-12, 7, 12, * +// * -58,-55, 13, 28,-20, -7, 14,-18, * +// * 29, 22, 3, 3,-11, 7, 11,-22, * +// * -1,-28,-27, 10, 0, -7, 11, 6, * +// * 7, 6, 21, 21,-10, -8, 2,-14, * +// * 1, -7,-15,-15,-10, 15, 16,-10, * +// * 0, -1, 0, 15, 4,-13, -5, 4 }; * +// * * +// * Exadecimal results: Block -> 02bb00a4 ffcdfff0 001ffff1 ffed0008 0047000e ffc3fffe 000bfff4 0007000c * +// * ffc6ffc9 000d001c ffecfff9 000effee 001d0016 00030003 fff50007 000bffea * +// * ffffffe4 ffe5000a 0000fff9 000b0006 00070006 00150015 fff6fff8 0002fff2 * +// * 0001fff9 fff1fff1 fff6000f 0010fff6 0000ffff 0000000f 0004fff3 fffb0004 * +// * * +// * Number of clock cycles (with these inputs) -> 2132 * +// ********************************************************************************************************* + +#ifdef IO +#include "libp.c" +#include "arith.c" +#include "string.c" +#endif + +// Cosine Transform Coefficients + +#define W1 2841 /* 2048*sqrt(2)*cos(1*pi/16) */ +#define W2 2676 /* 2048*sqrt(2)*cos(2*pi/16) */ +#define W3 2408 /* 2048*sqrt(2)*cos(3*pi/16) */ +#define W5 1609 /* 2048*sqrt(2)*cos(5*pi/16) */ +#define W6 1108 /* 2048*sqrt(2)*cos(6*pi/16) */ +#define W7 565 /* 2048*sqrt(2)*cos(7*pi/16) */ + +// Other FDCT Parameters +#define CONST_BITS 13 +#define PASS1_BITS 2 + +int out; + +// Image block to be transformed: +short int block[64]= +{ 99 ,104 ,109 ,113 ,115 ,115 , 55 , 55, + 104 ,111 ,113 ,116 ,119 , 56 , 56 , 56, + 110 ,115 ,120 ,119 ,118 , 56 , 56 , 56, + 119 ,121 ,122 ,120 ,120 , 59 , 59 , 59, + 119 ,120 ,121 ,122 ,122 , 55 , 55 , 55, + 121 ,121 ,121 ,121 , 60 , 57 , 57 , 57, + 122 ,122 , 61 , 63 , 62 , 57 , 57 , 57, + 62 , 62 , 61 , 61 , 63 , 58 , 58 , 58, +}; + +/* Fast Discrete Cosine Transform */ + +void fdct(short int *blk, int lx) +{ + int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + int tmp10, tmp11, tmp12, tmp13; + int z1, z2, z3, z4, z5; + int i; + short int *block; + + int constant; + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT; */ + /* furthermore, we scale the results by 2**PASS1_BITS. */ + + block=blk; + + for (i=0; i<8; i++) + { + tmp0 = block[0] + block[7]; + tmp7 = block[0] - block[7]; + tmp1 = block[1] + block[6]; + tmp6 = block[1] - block[6]; + tmp2 = block[2] + block[5]; + tmp5 = block[2] - block[5]; + tmp3 = block[3] + block[4]; + tmp4 = block[3] - block[4]; + + /* Even part per LL&M figure 1 --- note that published figure is faulty; + * rotator "sqrt(2)*c1" should be "sqrt(2)*c6". + */ + + tmp10 = tmp0 + tmp3; + tmp13 = tmp0 - tmp3; + tmp11 = tmp1 + tmp2; + tmp12 = tmp1 - tmp2; + + block[0] = ((tmp10+tmp11) << PASS1_BITS); + block[4] = ((tmp10-tmp11) << PASS1_BITS); + + constant= 4433; + z1 = (tmp12 + tmp13) * constant; + constant= 6270; + block[2] = (z1 + (tmp13 * constant)) >> (CONST_BITS-PASS1_BITS); + constant= -15137; + block[6] = (z1 + (tmp12 * constant)) >> (CONST_BITS-PASS1_BITS); + + /* Odd part per figure 8 --- note paper omits factor of sqrt(2). + * cK represents cos(K*pi/16). + * i0..i3 in the paper are tmp4..tmp7 here. + */ + + z1 = tmp4 + tmp7; + z2 = tmp5 + tmp6; + z3 = tmp4 + tmp6; + z4 = tmp5 + tmp7; + constant= 9633; + z5 = ((z3 + z4) * constant); /* sqrt(2) * c3 */ + + constant= 2446; + tmp4 = (tmp4 * constant); /* sqrt(2) * (-c1+c3+c5-c7) */ + constant= 16819; + tmp5 = (tmp5 * constant); /* sqrt(2) * ( c1+c3-c5+c7) */ + constant= 25172; + tmp6 = (tmp6 * constant); /* sqrt(2) * ( c1+c3+c5-c7) */ + constant= 12299; + tmp7 = (tmp7 * constant); /* sqrt(2) * ( c1+c3-c5-c7) */ + constant= -7373; + z1 = (z1 * constant); /* sqrt(2) * (c7-c3) */ + constant= -20995; + z2 = (z2 * constant); /* sqrt(2) * (-c1-c3) */ + constant= -16069; + z3 = (z3 * constant); /* sqrt(2) * (-c3-c5) */ + constant= -3196; + z4 = (z4 * constant); /* sqrt(2) * (c5-c3) */ + + z3 += z5; + z4 += z5; + + block[7] = (tmp4 + z1 + z3) >> (CONST_BITS-PASS1_BITS); + block[5] = (tmp5 + z2 + z4) >> (CONST_BITS-PASS1_BITS); + block[3] = (tmp6 + z2 + z3) >> (CONST_BITS-PASS1_BITS); + block[1] = (tmp7 + z1 + z4) >> (CONST_BITS-PASS1_BITS); + + + + /* advance to next row */ + block += lx; + + } + + /* Pass 2: process columns. */ + + block=blk; + + for (i = 0; i<8; i++) + { + tmp0 = block[0] + block[7*lx]; + tmp7 = block[0] - block[7*lx]; + tmp1 = block[lx] + block[6*lx]; + tmp6 = block[lx]- block[6*lx]; + tmp2 = block[2*lx] + block[5*lx]; + tmp5 = block[2*lx] - block[5*lx]; + tmp3 = block[3*lx] + block[4*lx]; + tmp4 = block[3*lx] - block[4*lx]; + + /* Even part per LL&M figure 1 --- note that published figure is faulty; + * rotator "sqrt(2)*c1" should be "sqrt(2)*c6". + */ + + tmp10 = tmp0 + tmp3; + tmp13 = tmp0 - tmp3; + tmp11 = tmp1 + tmp2; + tmp12 = tmp1 - tmp2; + + block[0] = (tmp10 + tmp11) >> (PASS1_BITS+3); + block[4*lx] = (tmp10 - tmp11) >> (PASS1_BITS+3); + + constant = 4433; + z1 = ((tmp12 + tmp13) * constant); + constant= 6270; + block[2*lx] = (z1 + (tmp13 * constant)) >> (CONST_BITS+PASS1_BITS+3); + constant=-15137; + block[6*lx] = (z1 + (tmp12 * constant)) >> (CONST_BITS+PASS1_BITS+3); + + /* Odd part per figure 8 --- note paper omits factor of sqrt(2). + * cK represents cos(K*pi/16). + * i0..i3 in the paper are tmp4..tmp7 here. + */ + + z1 = tmp4 + tmp7; + z2 = tmp5 + tmp6; + z3 = tmp4 + tmp6; + z4 = tmp5 + tmp7; + constant=9633; + z5 = ((z3 + z4) * constant); /* sqrt(2) * c3 */ + + constant=2446; + tmp4 = (tmp4 * constant); /* sqrt(2) * (-c1+c3+c5-c7) */ + constant=16819; + tmp5 = (tmp5 * constant); /* sqrt(2) * ( c1+c3-c5+c7) */ + constant=25172; + tmp6 = (tmp6 * constant); /* sqrt(2) * ( c1+c3+c5-c7) */ + constant=12299; + tmp7 = (tmp7 * constant); /* sqrt(2) * ( c1+c3-c5-c7) */ + constant=-7373; + z1 = (z1 * constant); /* sqrt(2) * (c7-c3) */ + constant= -20995; + z2 = (z2 * constant); /* sqrt(2) * (-c1-c3) */ + constant=-16069; + z3 = (z3 * constant); /* sqrt(2) * (-c3-c5) */ + constant=-3196; + z4 = (z4 * constant); /* sqrt(2) * (c5-c3) */ + + z3 += z5; + z4 += z5; + + block[7*lx] = (tmp4 + z1 + z3) >> (CONST_BITS+PASS1_BITS+3); + block[5*lx] = (tmp5 + z2 + z4) >> (CONST_BITS+PASS1_BITS+3); + block[3*lx] = (tmp6 + z2 + z3) >> (CONST_BITS+PASS1_BITS+3); + block[lx] = (tmp7 + z1 + z4) >> (CONST_BITS+PASS1_BITS+3); + + /* advance to next column */ + block++; + } +} + +int main(void) +{ +/* int i; */ + + fdct (block, 8); // 8x8 Blocks, DC precision value = 0, Quantization coefficient (mquant) = 64 + + #ifdef IO + for(i=0;i<64;i+=2) printf("block[%2d] -> %8d . block[%2d] -> %8d\n",i,block[i],i+1,block[i+1]); + #endif + + return block[0]; +} diff --git a/test/src/fft1.c b/test/src/fft1.c new file mode 100755 index 0000000..1ddcd1b --- /dev/null +++ b/test/src/fft1.c @@ -0,0 +1,218 @@ +/*************************************************************************/ +/* */ +/* SNU-RT Benchmark Suite for Worst Case Timing Analysis */ +/* ===================================================== */ +/* Collected and Modified by S.-S. Lim */ +/* sslim@archi.snu.ac.kr */ +/* Real-Time Research Group */ +/* Seoul National University */ +/* */ +/* */ +/* < Features > - restrictions for our experimental environment */ +/* */ +/* 1. Completely structured. */ +/* - There are no unconditional jumps. */ +/* - There are no exit from loop bodies. */ +/* (There are no 'break' or 'return' in loop bodies) */ +/* 2. No 'switch' statements. */ +/* 3. No 'do..while' statements. */ +/* 4. Expressions are restricted. */ +/* - There are no multiple expressions joined by 'or', */ +/* 'and' operations. */ +/* 5. No library calls. */ +/* - All the functions needed are implemented in the */ +/* source file. */ +/* */ +/* */ +/*************************************************************************/ +/* */ +/* FILE: fft1.c */ +/* SOURCE : Turbo C Programming for Engineering by Hyun Soon Ahn */ +/* */ +/* DESCRIPTION : */ +/* */ +/* FFT using Cooly-Turkey algorithm. */ +/* There are two inputs, ar[] and ai[]. ar[] is real number parts */ +/* of input array and the ai[] is imaginary number parts of input. */ +/* The function fft1 process FFT or inverse FFT according to the .*/ +/* parameter flag. (FFT with flag=0, inverse FFT with flag=1). */ +/* */ +/* */ +/* REMARK : */ +/* */ +/* EXECUTION TIME : */ +/* */ +/* */ +/*************************************************************************/ + + +#define PI 3.14159 +#define M_PI 3.14159 + +double ar[8]; +double ai[8] = {0., }; + +int fft1(int n, int flag); + + +static double fabs(double n) +{ + double f; + + if (n >= 0) f = n; + else f = -n; + return f; +} + +static double log(double n) +{ + return(4.5); +} + + +static double sin(rad) +double rad; +{ + double app; + + double diff; + int inc = 1; + + while (rad > 2*PI) + rad -= 2*PI; + while (rad < -2*PI) + rad += 2*PI; + app = diff = rad; + diff = (diff * (-(rad*rad))) / + ((2.0 * inc) * (2.0 * inc + 1.0)); + app = app + diff; + inc++; + while(fabs(diff) >= 0.00001) { + diff = (diff * (-(rad*rad))) / + ((2.0 * inc) * (2.0 * inc + 1.0)); + app = app + diff; + inc++; + } + + return(app); +} + + +static double cos(double rad) +{ + double sin(); + + return (sin (PI / 2.0 - rad)); +} + + +void main() +{ + + int i, n = 8, flag, chkerr; + + + /* ar */ + for(i = 0; i < n; i++) + ar[i] = cos(2*M_PI*i/n); + + /* forward fft */ + flag = 0; + chkerr = fft1(n, flag); + + /* inverse fft */ + flag = 1; + chkerr = fft1(n, flag); + +} + + + +int fft1(int n, int flag) +{ + + int i, j, k, it, xp, xp2, j1, j2, iter; + double sign, w, wr, wi, dr1, dr2, di1, di2, tr, ti, arg; + + if(n < 2) return(999); + iter = log((double)n)/log(2.0); + j = 1; +#ifdef DEBUG + printf("iter=%d\n",iter); +#endif + for(i = 0; i < iter; i++) + j *= 2; + if(fabs(n-j) > 1.0e-6) + return(1); + + /* Main FFT Loops */ + sign = ((flag == 1) ? 1.0 : -1.0); + xp2 = n; + for(it = 0; it < iter; it++) + { + xp = xp2; + xp2 /= 2; + w = PI / xp2; +#ifdef DEBUG + printf("xp2=%d\n",xp2); +#endif + for(k = 0; k < xp2; k++) + { + arg = k * w; + wr = cos(arg); + wi = sign * sin(arg); + i = k - xp; + for(j = xp; j <= n; j += xp) + { + j1 = j + i; + j2 = j1 + xp2; + dr1 = ar[j1]; + dr2 = ar[j2]; + di1 = ai[j1]; + di2 = ai[j2]; + tr = dr1 - dr2; + ti = di1 - di2; + ar[j1] = dr1 + dr2; + ai[j1] = di1 + di2; + ar[j2] = tr * wr - ti * wi; + ai[j2] = ti * wr + tr * wi; + } + } + } + + /* Digit Reverse Counter */ + + j1 = n / 2; + j2 = n - 1; + j = 1; +#ifdef DEBUG + printf("j2=%d\n",j2); +#endif + for(i = 1; i <= j2; i++) + { + if(i < j) + { + tr = ar[j-1]; + ti = ai[j-1]; + ar[j-1] = ar[i-1]; + ai[j-1] = ai[i-1]; + ar[i-1] = tr; + ai[i-1] = ti; + } + k = j1; + while(k < j) + { + j -= k; + k /= 2; + } + j += k; + } + if(flag == 0) return(0); + w = n; + for(i = 0; i < n; i++) + { + ar[i] /= w; + ai[i] /= w; + } + return(0); +} diff --git a/test/src/fibcall.c b/test/src/fibcall.c new file mode 100755 index 0000000..3719249 --- /dev/null +++ b/test/src/fibcall.c @@ -0,0 +1,74 @@ +/* $Id: fibcall.c,v 1.2 2005/04/04 11:34:58 csg Exp $ */ + +/*************************************************************************/ +/* */ +/* SNU-RT Benchmark Suite for Worst Case Timing Analysis */ +/* ===================================================== */ +/* Collected and Modified by S.-S. Lim */ +/* sslim@archi.snu.ac.kr */ +/* Real-Time Research Group */ +/* Seoul National University */ +/* */ +/* */ +/* < Features > - restrictions for our experimental environment */ +/* */ +/* 1. Completely structured. */ +/* - There are no unconditional jumps. */ +/* - There are no exit from loop bodies. */ +/* (There are no 'break' or 'return' in loop bodies) */ +/* 2. No 'switch' statements. */ +/* 3. No 'do..while' statements. */ +/* 4. Expressions are restricted. */ +/* - There are no multiple expressions joined by 'or', */ +/* 'and' operations. */ +/* 5. No library calls. */ +/* - All the functions needed are implemented in the */ +/* source file. */ +/* */ +/* */ +/*************************************************************************/ +/* */ +/* FILE: fibcall.c */ +/* SOURCE : Public Domain Code */ +/* */ +/* DESCRIPTION : */ +/* */ +/* Summing the Fibonacci series. */ +/* */ +/* REMARK : */ +/* */ +/* EXECUTION TIME : */ +/* */ +/* */ +/*************************************************************************/ + + + +int fib(int n) +{ + int i, Fnew, Fold, temp,ans; + + Fnew = 1; Fold = 0; + for ( i = 2; + i <= 30 && i <= n; /* apsim_loop 1 0 */ + i++ ) + { + temp = Fnew; + Fnew = Fnew + Fold; + Fold = temp; + } + ans = Fnew; + return ans; +} + +int main() +{ + int a; + + a = 30; + fib(a); + return a; +} + + + diff --git a/test/src/fir.c b/test/src/fir.c new file mode 100755 index 0000000..61de052 --- /dev/null +++ b/test/src/fir.c @@ -0,0 +1,299 @@ +/* MDH WCET BENCHMARK SUITE. */ + +/* 2012/09/28, Jan Gustafsson + * Changes: + * - Adam Betts wrote: I'm currently looking at the fir benchmark and noticed something + * peculiar. The "in_data" array has 701 elements (effectively 700 as the + * last elements acts as a sentinel value) and "fir_filter_int" requires + * the length of the input/output arrays (passed in the " in_len" + * argument). So I was expecting 700 to be passed as this parameter. Indeed it was, + * but it has since been changed. The code now contains the following 2 + * lines: + * + * //  fir_filter_int(in_data,output,700,fir_int,35,285); Changed JG/Ebbe + * fir_filter_int(in_data,output,10,fir_int,35,285); + * + * Can you explain why the change? + * + *** + * + * Jan G wrote: Since neither Ebbe nor I can explain the change, I propose to change it back. + * => + * Action: Fixed. + */ + + +/* Execute the integer fir filter from "C Algorithms for DSP". + * + * Adapted for WCET benchmarking by IJAE May 2000. + * + * Features: nested variable-length loops. + * if-statement with branches taken a known number of times + * + * Added explicit sizes of the arrays to work-around a bug in the + * NIC-compiler. Christer Sandberg + */ +/* #define LOOPS 720 */ +#define LOOPS 1 + + +/*-------------------------------------------------- + *---- INPUT DATA FOR TESTING + *--------------------------------------------------*/ +long fir_int[36]={ +0xfffffffe, 0x1, 0x4, 0x3, 0xfffffffe, 0xfffffffc, 0x2, 0x7, 0x0, +0xfffffff7, 0xfffffffc, 0xc, 0xb, 0xfffffff2, 0xffffffe6, 0xf, 0x59, 0x7f, +0x59, 0xf, 0xffffffe6, 0xfffffff2, 0xb, 0xc, 0xfffffffc, 0xfffffff7, 0x0, +0x7, 0x2, 0xfffffffc, 0xfffffffe, 0x3, 0x4, 0x1, 0xfffffffe, 0}; + +long in_data[701]={ +0x0, 0x0, 0x0, 0x0, 0x7f, 0x79, 0x72, 0x79, 0xd, 0xd, +0x0, 0x3, 0x5, 0x2, 0x3, 0x7f, 0x7f, 0x2, 0x7e, 0x0, +0x1, 0x7e, 0x1, 0x1, 0x7f, 0x0, 0x7f, 0x0, 0x2, 0x1, +0x1, 0x3, 0x1, 0x7f, 0x1, 0x0, 0x1, 0x1, 0x7d, 0x7b, +0x73, 0x6a, 0x77, 0x10, 0xe, 0x1, 0x5, 0x5, 0x5, 0x5, +0x7d, 0x0, 0x2, 0x7d, 0x0, 0x0, 0x7e, 0x1, 0x7e, 0x7f, +0x3, 0x7c, 0x7e, 0x6, 0x0, 0x7e, 0x3, 0x2, 0x7f, 0x7e, +0x7f, 0x2, 0x1, 0x7f, 0x1, 0x1, 0x0, 0x3, 0x0, 0x7f, +0x2, 0x0, 0x7f, 0x3, 0x1, 0x0, 0x0, 0x7d, 0x0, 0x3, +0x0, 0x7e, 0x7f, 0x2, 0x1, 0x7e, 0x0, 0x3, 0x7f, 0x7d, +0x1, 0x1, 0x1, 0x7f, 0x0, 0x5, 0x0, 0x7f, 0x2, 0x7e, +0x7f, 0x2, 0x1, 0x0, 0x7e, 0x0, 0x5, 0x0, 0x7f, 0x0, +0x7e, 0x1, 0x0, 0x7d, 0x1, 0x3, 0x7f, 0x0, 0x0, 0x7e, +0x2, 0x3, 0x7e, 0x7d, 0x72, 0x68, 0x71, 0x5, 0xc, 0x7, +0x2, 0x6, 0xd, 0x5, 0x7d, 0x3, 0x2, 0x7f, 0x0, 0x79, +0x7a, 0x3, 0x7e, 0x7d, 0x0, 0x7d, 0x2, 0x1, 0x7d, 0x8, +0x3, 0x7c, 0x6, 0x0, 0x7a, 0x6, 0x2, 0x7c, 0x3, 0x7e, +0x79, 0x6, 0x5, 0x74, 0x7f, 0xd, 0x7a, 0x78, 0x6, 0x5, +0x1, 0x0, 0x7d, 0x1, 0x4, 0x7c, 0x7f, 0x3, 0x7f, 0x5, +0x3, 0x7a, 0x6, 0xa, 0x76, 0x7c, 0xa, 0x7c, 0x7f, 0x6, +0x79, 0x3, 0xc, 0x75, 0x78, 0xa, 0x0, 0x79, 0x3, 0x7e, +0x7c, 0x6, 0x0, 0x79, 0x2, 0x7e, 0x7f, 0x6, 0x76, 0x7f, +0xd, 0x79, 0x7f, 0x6, 0x79, 0x6, 0x3, 0x71, 0x6, 0xa, +0x73, 0x7f, 0xa, 0x0, 0x7f, 0x7a, 0x7c, 0xa, 0x0, 0x75, +0x7f, 0xc, 0xa, 0x7c, 0x79, 0x9, 0xd, 0x7d, 0x7a, 0x5, +0xb, 0xa, 0x79, 0x7c, 0x16, 0x3, 0x72, 0xd, 0x7, 0x79, +0xc, 0x7, 0x7a, 0xb, 0x7, 0x7a, 0xa, 0x7, 0x79, 0xa, +0x5, 0x75, 0x6, 0x5, 0x79, 0x5, 0x6, 0x1, 0x6, 0x0, +0x7a, 0x2, 0x7, 0x3, 0x7d, 0x1, 0xa, 0x7, 0x2, 0x7f, +0x7f, 0x9, 0x7, 0x79, 0x79, 0x6, 0x8, 0x7d, 0x7a, 0x6, +0xc, 0x6, 0x7d, 0x7f, 0xd, 0x7, 0x79, 0x1, 0x6, 0x7f, +0x7f, 0x2, 0x3, 0x1, 0x7e, 0x1, 0x1, 0x7d, 0x1, 0x0, +0x7d, 0x6, 0x3, 0x7d, 0x5, 0x7, 0x7f, 0x7c, 0x1, 0x6, +0x6, 0x7c, 0x7a, 0x7, 0xa, 0x0, 0x78, 0x1, 0x8, 0x0, +0x79, 0x7a, 0x4, 0xa, 0x0, 0x78, 0x1, 0x6, 0x7a, 0x75, +0x7a, 0x0, 0x0, 0x79, 0x76, 0x7f, 0x7, 0x0, 0x7a, 0x7d, +0x2, 0x4, 0x7c, 0x7a, 0x2, 0x5, 0x7c, 0x7a, 0x7d, 0x7f, +0x0, 0x78, 0x75, 0x7f, 0x0, 0x79, 0x78, 0x79, 0x1, 0x3, +0x79, 0x79, 0x0, 0x0, 0x7f, 0x7f, 0x79, 0x7f, 0x2, 0x7a, +0x7c, 0x7d, 0x7c, 0x7f, 0x7d, 0x79, 0x7d, 0x0, 0x79, 0x7a, +0x7c, 0x7d, 0x0, 0x7d, 0x7d, 0x0, 0x0, 0x0, 0x0, 0x7d, +0x7d, 0x0, 0x7d, 0x7e, 0x0, 0x7e, 0x3, 0x3, 0x7d, 0x1, +0x5, 0x0, 0x7e, 0x7d, 0x7f, 0x3, 0x7d, 0x79, 0x1, 0x2, +0x7d, 0x7f, 0x1, 0x0, 0x0, 0x7f, 0x7f, 0x7e, 0x7f, 0x0, +0x7f, 0x0, 0x7c, 0x7d, 0x0, 0x79, 0x78, 0x7c, 0x7c, 0x7b, +0x7b, 0x7d, 0x7f, 0x0, 0x0, 0x7f, 0x0, 0x1, 0x2, 0x0, +0x7f, 0x0, 0x0, 0x0, 0x7f, 0x7e, 0x0, 0x0, 0x7f, 0x0, +0x2, 0x1, 0x2, 0x6, 0x5, 0x3, 0x6, 0x8, 0x5, 0x2, +0x1, 0x1, 0x3, 0x0, 0x7d, 0x7f, 0x0, 0x7f, 0x7e, 0x0, +0x2, 0x3, 0x2, 0x1, 0x2, 0x3, 0x1, 0x7c, 0x7d, 0x0, +0x0, 0x7e, 0x7c, 0x7f, 0x1, 0x0, 0x7e, 0x7c, 0x7f, 0x1, +0x0, 0x7e, 0x7f, 0x2, 0x3, 0x1, 0x0, 0x4, 0x6, 0x5, +0x6, 0x7, 0xa, 0xa, 0x4, 0x2, 0x5, 0x8, 0x9, 0x8, +0x7, 0xc, 0x14, 0x14, 0x10, 0xe, 0x14, 0x15, 0xf, 0x9, +0x7, 0x4, 0x7e, 0x76, 0x64, 0x41, 0x48, 0x7d, 0x6c, 0x3d, +0x67, 0x10, 0x6, 0x7d, 0x75, 0x7, 0x1d, 0x0, 0x6c, 0x2, +0x7d, 0x78, 0x77, 0x6f, 0x77, 0x1, 0x0, 0x2, 0x7, 0xa, +0x1c, 0x1c, 0x17, 0x23, 0x2f, 0x41, 0x43, 0x4f, 0x55, 0x58, +0x7e, 0x2, 0x4c, 0x10, 0x69, 0x2c, 0xd, 0x74, 0x2a, 0x74, +0x63, 0x29, 0x7c, 0x5e, 0x21, 0x35, 0x46, 0x24, 0x67, 0x35, +0x3c, 0x3c, 0x26, 0x26, 0x2f, 0x47, 0x64, 0x4, 0x13, 0x18, +0x27, 0x2b, 0x30, 0x1b, 0x7f, 0x78, 0x72, 0x68, 0x5c, 0x5a, +0x68, 0x7c, 0x3, 0xd, 0x26, 0x41, 0x51, 0x5a, 0x6a, 0x6c, +0x54, 0x78, 0x9, 0x45, 0x79, 0x1f, 0xb, 0x2e, 0x60, 0xb, +0x66, 0x7f, 0x68, 0x77, 0x4e, 0x46, 0x4a, 0x3b, 0x12, 0x5b, +0x37, 0x31, 0x21, 0xb, 0x12, 0x2e, 0x57, 0x7e, 0x19, 0x22, +0x2b, 0x3f, 0x3a, 0x25, 0xb, 0x79, 0x71, 0x68, 0x61, 0x5c, +0x66, 0x72, 0x6, 0x16, 0x29, 0x41, 0x5e, 0x6d, 0x66, 0x60, +0x6e, 0x17, 0x48, 0x36, 0x12, 0x17, 0x2f, 0x63, 0x78, 0x5c, +0x77, 0x6c, 0x75, 0x41, 0x49, 0x4f, 0x3b, 0xb, 0x54, 0x37, +0 }; + +long out_data[720]={ +0x3, 0xfffffffa, 0xfffffffd, 0x1d, 0x58, 0x89, 0x87, 0x56, 0x20, 0x7, +0x7, 0x4, 0xfffffff9, 0x0, 0x28, 0x5b, 0x6b, 0x4f, 0x2b, 0x21, +0x2d, 0x30, 0x27, 0x27, 0x37, 0x47, 0x42, 0x27, 0x8, 0xfffffff4, +0xfffffff5, 0xd, 0x2e, 0x3b, 0x25, 0x0, 0xfffffff8, 0x1d, 0x59, 0x83, +0x87, 0x6f, 0x4e, 0x2f, 0x12, 0xffffffff, 0xfffffffb, 0x4, 0x15, 0x23, +0x2d, 0x31, 0x2f, 0x29, 0x26, 0x2a, 0x36, 0x48, 0x58, 0x5f, +0x5a, 0x4f, 0x46, 0x41, 0x32, 0x1b, 0x17, 0x37, 0x69, 0x7b, +0x59, 0x2f, 0x24, 0x30, 0x2a, 0x8, 0xfffffff6, 0x7, 0x24, 0x31, +0x2f, 0x33, 0x32, 0x1e, 0x4, 0x7, 0x23, 0x33, 0x21, 0xe, +0x1e, 0x4a, 0x61, 0x4b, 0x21, 0xe, 0x22, 0x49, 0x5e, 0x4d, +0x25, 0xb, 0x18, 0x32, 0x33, 0x15, 0x5, 0x29, 0x64, 0x76, +0x4d, 0x16, 0x9, 0x26, 0x37, 0x23, 0xb, 0x15, 0x3c, 0x52, +0x40, 0x23, 0x1d, 0x2d, 0x36, 0x2d, 0x24, 0x29, 0x32, 0x2c, +0x21, 0x2b, 0x50, 0x7b, 0x8d, 0x73, 0x47, 0x22, 0xf, 0x7, +0xffffffff, 0x0, 0x13, 0x2d, 0x36, 0x2b, 0x23, 0x32, 0x4e, 0x5c, +0x55, 0x4f, 0x55, 0x5c, 0x50, 0x34, 0x20, 0x22, 0x32, 0x38, +0x2f, 0x25, 0x2a, 0x35, 0x32, 0x23, 0x1f, 0x36, 0x57, 0x60, +0x4c, 0x31, 0x2d, 0x40, 0x57, 0x67, 0x67, 0x4c, 0x21, 0x4, +0x8, 0x20, 0x30, 0x2c, 0x33, 0x4e, 0x61, 0x56, 0x39, 0x26, +0x26, 0x2b, 0x2e, 0x38, 0x4a, 0x57, 0x58, 0x5c, 0x5f, 0x50, +0x31, 0x1d, 0x31, 0x58, 0x5d, 0x37, 0x16, 0x23, 0x55, 0x71, +0x56, 0x28, 0x18, 0x30, 0x51, 0x60, 0x5c, 0x52, 0x4f, 0x54, +0x5e, 0x62, 0x57, 0x45, 0x3a, 0x35, 0x26, 0x17, 0x23, 0x47, +0x5d, 0x48, 0x27, 0x30, 0x61, 0x79, 0x5a, 0x31, 0x2d, 0x45, +0x4f, 0x41, 0x3e, 0x48, 0x48, 0x3a, 0x3d, 0x53, 0x55, 0x2f, +0xd, 0x1f, 0x55, 0x69, 0x47, 0x1e, 0x1c, 0x32, 0x3c, 0x31, +0x28, 0x2d, 0x34, 0x32, 0x2e, 0x2e, 0x2f, 0x2d, 0x2f, 0x32, +0x2f, 0x26, 0x23, 0x30, 0x3d, 0x2c, 0x3, 0xffffffef, 0xa, 0x34, +0x39, 0x18, 0xa, 0x28, 0x42, 0x28, 0xfffffffb, 0xfffffffe, 0x37, 0x61, +0x53, 0x32, 0x35, 0x4b, 0x4c, 0x36, 0x36, 0x4e, 0x56, 0x33, +0xe, 0x1b, 0x4e, 0x69, 0x51, 0x22, 0xd, 0x24, 0x4b, 0x5e, +0x4d, 0x2a, 0x12, 0x16, 0x29, 0x35, 0x33, 0x2a, 0x25, 0x26, +0x2f, 0x38, 0x31, 0x1d, 0x1d, 0x42, 0x68, 0x58, 0x1a, 0xffffffff, +0x2a, 0x63, 0x5f, 0x27, 0xa, 0x22, 0x34, 0x1e, 0xb, 0x27, +0x58, 0x5a, 0x2e, 0x10, 0x1b, 0x28, 0x23, 0x31, 0x60, 0x7c, +0x56, 0x1b, 0x1d, 0x5d, 0x81, 0x5c, 0x29, 0x2c, 0x4e, 0x51, +0x35, 0x33, 0x4d, 0x53, 0x32, 0x24, 0x50, 0x86, 0x85, 0x5a, +0x46, 0x5d, 0x6b, 0x5b, 0x4f, 0x63, 0x71, 0x54, 0x2a, 0x2c, +0x50, 0x56, 0x30, 0x1e, 0x4d, 0x8d, 0x90, 0x5b, 0x3a, 0x55, +0x80, 0x89, 0x78, 0x7b, 0x8a, 0x7d, 0x53, 0x3e, 0x5b, 0x83, +0x7f, 0x59, 0x4a, 0x5b, 0x5e, 0x2e, 0xfffffff4, 0xfffffff3, 0x2d, 0x5f, +0x61, 0x50, 0x54, 0x5e, 0x50, 0x30, 0x26, 0x34, 0x32, 0x18, +0x9, 0x27, 0x5b, 0x74, 0x6d, 0x5e, 0x52, 0x40, 0x2d, 0x34, +0x54, 0x5c, 0x31, 0x0, 0xa, 0x56, 0x9c, 0x96, 0x59, 0x2e, +0x38, 0x57, 0x5e, 0x4b, 0x46, 0x5e, 0x78, 0x7c, 0x77, 0x80, +0x8d, 0x7d, 0x4f, 0x2b, 0x2b, 0x33, 0x1e, 0x0, 0x6, 0x28, +0x37, 0x1d, 0x9, 0x24, 0x53, 0x5d, 0x3d, 0x1f, 0x21, 0x29, +0x18, 0xfffffffc, 0xfffffff5, 0x6, 0x12, 0x9, 0xfffffffd, 0x1, 0xf, 0xc, +0xfffffffa, 0xfffffff2, 0x9, 0x32, 0x4d, 0x56, 0x5c, 0x62, 0x53, 0x27, +0x0, 0xfffffffc, 0xc, 0x8, 0xfffffff0, 0xfffffff9, 0x36, 0x6a, 0x55, 0x1c, +0x1b, 0x60, 0x8e, 0x61, 0x15, 0x14, 0x5e, 0x8c, 0x61, 0x1d, +0x1a, 0x52, 0x6b, 0x3d, 0xfffffffb, 0xffffffe8, 0x1, 0x15, 0xc, 0xfffffffe, +0x0, 0xd, 0x11, 0x9, 0x1, 0x1, 0x7, 0xc, 0xb, 0x7, +0x6, 0xd, 0x16, 0x17, 0x10, 0xc, 0x13, 0x1c, 0x13, 0x0, +0x0, 0x26, 0x5f, 0x7b, 0x68, 0x48, 0x48, 0x68, 0x7d, 0x60, +0x2d, 0x19, 0x37, 0x5c, 0x5a, 0x31, 0xf, 0x13, 0x31, 0x4c, +0x5e, 0x71, 0x83, 0x7f, 0x58, 0x20, 0xfffffffd, 0xfffffff8, 0x9, 0x18, +0x19, 0x16, 0x19, 0x27, 0x37, 0x3d, 0x42, 0x4f, 0x62, 0x65, +0x4f, 0x33, 0x2c, 0x36, 0x3d, 0x38, 0x34, 0x40, 0x54, 0x5d, +0x5b, 0x58, 0x59, 0x53, 0x41, 0x32, 0x33, 0x3f, 0x4a, 0x4b, +0x43, 0x33, 0x25, 0x28, 0x3d, 0x4f, 0x44, 0x23, 0xe, 0x16, +0x29, 0x2c, 0x28, 0x39, 0x63, 0x84, 0x7d, 0x5f, 0x56, 0x69, +0x73, 0x56, 0x24, 0xc, 0x1e, 0x40, 0x56, 0x60, 0x69, 0x6e, +0x63, 0x4e, 0x42, 0x44, 0x47, 0x3b, 0x2a, 0x22, 0x2d, 0x45, +0x5d, 0x70, 0x77, 0x6d, 0x5b, 0x4a, 0x3f, 0x37, 0x32, 0x39, +0x43, 0x3d, 0x20, 0x5, 0x10, 0x3f, 0x64, 0x5a, 0x34, 0x21, +0x2e, 0x3e, 0x33, 0x22, 0x30, 0x59, 0x75, 0x71, 0x60, 0x61, +0x67, 0x55, 0x2d, 0x12, 0x1d, 0x43, 0x65, 0x71, 0x6c, 0x5f, +0x53, 0x47, 0x39, 0x29, 0x1c, 0x1e, 0x35, 0x56, 0x6f, 0x74, +0x6f, 0x6a, 0x66, 0x5c, 0x4b, 0x3a, 0x33, 0x36, 0x38, 0x2c, +0 }; + +// To match size of input +#define OUTSIZE 720 + + + +/*-------------------------------------------------- + *--- Prototypes + *--------------------------------------------------*/ + + +void fir_filter_int(long* in,long* out,long in_len, + long* coef,long coef_len, + long scale); + + +/*-------------------------------------------------- + *--- Main Function + *--------------------------------------------------*/ +int main() +{ + long output[OUTSIZE]; + + fir_filter_int(in_data,output,700,fir_int,35,285); +// fir_filter_int(in_data,output,10,fir_int,35,285);Changed JG/Ebbe + + /* Verify results */ + /*for(i=0;i<700;i++)*/ + /* if (output[i]!=out_data[i])*/ + /*{ printf("Error: index %d, data %x != %x\n",*/ + /* i, output[i], out_data[i]); */ + /* break; */ + /* }*/ + return 0; +} + +/************************************************************************** +fir_filter_int - Filters int data array based on passed int coefficients. + +The length of the input and output arrays are equal +and are allocated by the calller. +The length of the coefficient array is passed. +An integer scale factor (passed) is used to divide the accumulation result. + +void fir_filter_int(int *in,int *out,int in_len, + int *coef,int coef_len,int scale) + + in integer pointer to input array + out integer pointer to output array + in_len length of input and output arrays + coef integer pointer to coefficient array + coef_len length of coeffient array + scale scale factor to divide after accumulation + +No return value. + +*************************************************************************/ + +void fir_filter_int(long* in,long* out,long in_len, + long* coef,long coef_len, + long scale) +{ + long i,j,coef_len2,acc_length; + long acc; + long *in_ptr,*data_ptr,*coef_start,*coef_ptr,*in_end; + + /* set up for coefficients */ + coef_start = coef; + coef_len2 = (coef_len + 1) >> 1; + + /* set up input data pointers */ + in_end = in + in_len - 1; + in_ptr = in + coef_len2 - 1; + + /* initial value of accumulation length for startup */ + acc_length = coef_len2; + + for(i = 0 ; i < in_len ; i++) { + + /* set up pointer for accumulation */ + data_ptr = in_ptr; + coef_ptr = coef_start; + + /* do accumulation and write result with scale factor */ + + acc = (long)(*coef_ptr++) * (*data_ptr--); + for(j = 1 ; j < acc_length ; j++) + acc += (long)(*coef_ptr++) * (*data_ptr--); + *out++ = (int)(acc/scale); + + /* check for end case */ + + if(in_ptr == in_end) { + acc_length--; /* one shorter each time */ + coef_start++; /* next coefficient each time */ + } + + /* if not at end, then check for startup, add to input pointer */ + + else { + if(acc_length < coef_len) acc_length++; + in_ptr++; + } + } +} + diff --git a/test/src/insertsort.c b/test/src/insertsort.c new file mode 100755 index 0000000..55cdb61 --- /dev/null +++ b/test/src/insertsort.c @@ -0,0 +1,91 @@ +/* $Id: insertsort.c,v 1.2 2005/04/04 11:34:58 csg Exp $ */ + +/*************************************************************************/ +/* */ +/* SNU-RT Benchmark Suite for Worst Case Timing Analysis */ +/* ===================================================== */ +/* Collected and Modified by S.-S. Lim */ +/* sslim@archi.snu.ac.kr */ +/* Real-Time Research Group */ +/* Seoul National University */ +/* */ +/* */ +/* < Features > - restrictions for our experimental environment */ +/* */ +/* 1. Completely structured. */ +/* - There are no unconditional jumps. */ +/* - There are no exit from loop bodies. */ +/* (There are no 'break' or 'return' in loop bodies) */ +/* 2. No 'switch' statements. */ +/* 3. No 'do..while' statements. */ +/* 4. Expressions are restricted. */ +/* - There are no multiple expressions joined by 'or', */ +/* 'and' operations. */ +/* 5. No library calls. */ +/* - All the functions needed are implemented in the */ +/* source file. */ +/* */ +/* */ +/*************************************************************************/ +/* */ +/* FILE: insertsort.c */ +/* SOURCE : Public Domain Code */ +/* */ +/* DESCRIPTION : */ +/* */ +/* Insertion sort for 10 integer numbers. */ +/* The integer array a[] is initialized in main function. */ +/* */ +/* REMARK : */ +/* */ +/* EXECUTION TIME : */ +/* */ +/* */ +/*************************************************************************/ + + + +#ifdef DEBUG +int cnt1, cnt2; +#endif + +unsigned int a[11]; + +int main() +{ + int i,j, temp; + + a[0] = 0; /* assume all data is positive */ + a[1] = 11; a[2]=10;a[3]=9; a[4]=8; a[5]=7; a[6]=6; a[7]=5; + a[8] =4; a[9]=3; a[10]=2; + i = 2; + while(i <= 10){ +#ifdef DEBUG + cnt1++; +#endif + j = i; +#ifdef DEBUG + cnt2=0; +#endif + while (a[j] < a[j-1]) + { +#ifdef DEBUG + cnt2++; +#endif + temp = a[j]; + a[j] = a[j-1]; + a[j-1] = temp; + j--; + } +#ifdef DEBUG + printf("Inner Loop Counts: %d\n", cnt2); +#endif + i++; + } +#ifdef DEBUG + printf("Outer Loop : %d , Inner Loop : %d\n", cnt1, cnt2); +#endif + return 1; +} + + diff --git a/test/src/janne_complex.c b/test/src/janne_complex.c new file mode 100755 index 0000000..e03f119 --- /dev/null +++ b/test/src/janne_complex.c @@ -0,0 +1,63 @@ + +/*---------------------------------------------------------------------- + * WCET Benchmark created by Andreas Ermedahl, Uppsala university, + * May 2000. + * + * The purpose of this benchmark is to have two loop where the inner + * loops max number of iterations depends on the outer loops current + * iterations. The results corresponds to something Jannes flow-analysis + * should produce. + * + * The example appeard for the first time in: + * + * @InProceedings{Ermedahl:Annotations, + * author = "A. Ermedahl and J. Gustafsson", + * title = "Deriving Annotations for Tight Calculation of Execution Time", + * year = 1997, + * month = aug, + * booktitle = EUROPAR97, + * publisher = "Springer Verlag", + * pages = "1298-1307" + * } + * + * The result of Jannes tool is something like: + * outer loop: 1 2 3 4 5 6 7 8 9 10 11 + * inner loop max: 5 9 8 7 4 2 1 1 1 1 1 + * + *----------------------------------------------------------------------*/ + +int complex(int a, int b) +{ + while(a < 30) + { + while(b < a) + { + if(b > 5) + b = b * 3; + else + b = b + 2; + if(b >= 10 && b <= 12) + a = a + 10; + else + a = a + 1; + } + a = a + 2; + b = b - 10; + } + return 1; +} + +int main() +{ + /* a = [1..30] b = [1..30] */ + int a = 1, b = 1, answer = 0; + /* if(answer) + {a = 1; b = 1;} + else + {a = 30; b = 30;} */ + answer = complex(a, b); + return answer; +} + + + diff --git a/test/src/jfdctint.c b/test/src/jfdctint.c new file mode 100755 index 0000000..7e08606 --- /dev/null +++ b/test/src/jfdctint.c @@ -0,0 +1,374 @@ +/* $Id: jfdctint.c,v 1.2 2005/04/04 11:34:58 csg Exp $ */ + +/*************************************************************************/ +/* */ +/* SNU-RT Benchmark Suite for Worst Case Timing Analysis */ +/* ===================================================== */ +/* Collected and Modified by S.-S. Lim */ +/* sslim@archi.snu.ac.kr */ +/* Real-Time Research Group */ +/* Seoul National University */ +/* */ +/* */ +/* < Features > - restrictions for our experimental environment */ +/* */ +/* 1. Completely structured. */ +/* - There are no unconditional jumps. */ +/* - There are no exit from loop bodies. */ +/* (There are no 'break' or 'return' in loop bodies) */ +/* 2. No 'switch' statements. */ +/* 3. No 'do..while' statements. */ +/* 4. Expressions are restricted. */ +/* - There are no multiple expressions joined by 'or', */ +/* 'and' operations. */ +/* 5. No library calls. */ +/* - All the functions needed are implemented in the */ +/* source file. */ +/* */ +/* */ +/*************************************************************************/ +/* */ +/* FILE: jfdctint.c */ +/* SOURCE : Thomas G. Lane, Public domain JPEG source code. */ +/* Modified by Steven Li at Princeton University. */ +/* */ +/* DESCRIPTION : */ +/* */ +/* JPEG slow-but-accurate integer implementation of the forward */ +/* DCT (Discrete Cosine Transform). */ +/* */ +/* REMARK : */ +/* */ +/* EXECUTION TIME : */ +/* */ +/* */ +/*************************************************************************/ + + +/********************************************************************** + Functions to be timed +***********************************************************************/ + +/* This definitions are added by Steven Li so as to bypass the header + files. + */ +#define DCT_ISLOW_SUPPORTED +#define DCTSIZE 8 +#define BITS_IN_JSAMPLE 8 +#define MULTIPLY16C16(var,const) ((var) * (const)) +#define DCTELEM int +#define INT32 int +#define GLOBAL +#define RIGHT_SHIFT(x,shft) ((x) >> (shft)) +#define ONE ((INT32) 1) +#define DESCALE(x,n) RIGHT_SHIFT((x) + (ONE << ((n)-1)), n) +#define SHIFT_TEMPS + + +/* + * jfdctint.c + * + * Copyright (C) 1991-1994, Thomas G. Lane. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains a slow-but-accurate integer implementation of the + * forward DCT (Discrete Cosine Transform). + * + * A 2-D DCT can be done by 1-D DCT on each row followed by 1-D DCT + * on each column. Direct algorithms are also available, but they are + * much more complex and seem not to be any faster when reduced to code. + * + * This implementation is based on an algorithm described in + * C. Loeffler, A. Ligtenberg and G. Moschytz, "Practical Fast 1-D DCT + * Algorithms with 11 Multiplications", Proc. Int'l. Conf. on Acoustics, + * Speech, and Signal Processing 1989 (ICASSP '89), pp. 988-991. + * The primary algorithm described there uses 11 multiplies and 29 adds. + * We use their alternate method with 12 multiplies and 32 adds. + * The advantage of this method is that no data path contains more than one + * multiplication; this allows a very simple and accurate implementation in + * scaled fixed-point arithmetic, with a minimal number of shifts. + */ + +#define JPEG_INTERNALS + +#ifdef DCT_ISLOW_SUPPORTED + + +/* + * This module is specialized to the case DCTSIZE = 8. + */ + +#if DCTSIZE != 8 + Sorry, this code only copes with 8x8 DCTs. /* deliberate syntax err */ +#endif + + +/* + * The poop on this scaling stuff is as follows: + * + * Each 1-D DCT step produces outputs which are a factor of sqrt(N) + * larger than the true DCT outputs. The final outputs are therefore + * a factor of N larger than desired; since N=8 this can be cured by + * a simple right shift at the end of the algorithm. The advantage of + * this arrangement is that we save two multiplications per 1-D DCT, + * because the y0 and y4 outputs need not be divided by sqrt(N). + * In the IJG code, this factor of 8 is removed by the quantization step + * (in jcdctmgr.c), NOT in this module. + * + * We have to do addition and subtraction of the integer inputs, which + * is no problem, and multiplication by fractional constants, which is + * a problem to do in integer arithmetic. We multiply all the constants + * by CONST_SCALE and convert them to integer constants (thus retaining + * CONST_BITS bits of precision in the constants). After doing a + * multiplication we have to divide the product by CONST_SCALE, with proper + * rounding, to produce the correct output. This division can be done + * cheaply as a right shift of CONST_BITS bits. We postpone shifting + * as long as possible so that partial sums can be added together with + * full fractional precision. + * + * The outputs of the first pass are scaled up by PASS1_BITS bits so that + * they are represented to better-than-integral precision. These outputs + * require BITS_IN_JSAMPLE + PASS1_BITS + 3 bits; this fits in a 16-bit word + * with the recommended scaling. (For 12-bit sample data, the intermediate + * array is INT32 anyway.) + * + * To avoid overflow of the 32-bit intermediate results in pass 2, we must + * have BITS_IN_JSAMPLE + CONST_BITS + PASS1_BITS <= 26. Error analysis + * shows that the values given below are the most effective. + */ + +#if BITS_IN_JSAMPLE == 8 +#define CONST_BITS 13 +#define PASS1_BITS 2 +#else +#define CONST_BITS 13 +#define PASS1_BITS 1 /* lose a little precision to avoid overflow */ +#endif + +/* Some C compilers fail to reduce "FIX(constant)" at compile time, thus + * causing a lot of useless floating-point operations at run time. + * To get around this we use the following pre-calculated constants. + * If you change CONST_BITS you may want to add appropriate values. + * (With a reasonable C compiler, you can just rely on the FIX() macro...) + */ + +#if CONST_BITS == 13 +#define FIX_0_298631336 ((INT32) 2446) /* FIX(0.298631336) */ +#define FIX_0_390180644 ((INT32) 3196) /* FIX(0.390180644) */ +#define FIX_0_541196100 ((INT32) 4433) /* FIX(0.541196100) */ +#define FIX_0_765366865 ((INT32) 6270) /* FIX(0.765366865) */ +#define FIX_0_899976223 ((INT32) 7373) /* FIX(0.899976223) */ +#define FIX_1_175875602 ((INT32) 9633) /* FIX(1.175875602) */ +#define FIX_1_501321110 ((INT32) 12299) /* FIX(1.501321110) */ +#define FIX_1_847759065 ((INT32) 15137) /* FIX(1.847759065) */ +#define FIX_1_961570560 ((INT32) 16069) /* FIX(1.961570560) */ +#define FIX_2_053119869 ((INT32) 16819) /* FIX(2.053119869) */ +#define FIX_2_562915447 ((INT32) 20995) /* FIX(2.562915447) */ +#define FIX_3_072711026 ((INT32) 25172) /* FIX(3.072711026) */ +#else +#define FIX_0_298631336 FIX(0.298631336) +#define FIX_0_390180644 FIX(0.390180644) +#define FIX_0_541196100 FIX(0.541196100) +#define FIX_0_765366865 FIX(0.765366865) +#define FIX_0_899976223 FIX(0.899976223) +#define FIX_1_175875602 FIX(1.175875602) +#define FIX_1_501321110 FIX(1.501321110) +#define FIX_1_847759065 FIX(1.847759065) +#define FIX_1_961570560 FIX(1.961570560) +#define FIX_2_053119869 FIX(2.053119869) +#define FIX_2_562915447 FIX(2.562915447) +#define FIX_3_072711026 FIX(3.072711026) +#endif + + +/* Multiply an INT32 variable by an INT32 constant to yield an INT32 result. + * For 8-bit samples with the recommended scaling, all the variable + * and constant values involved are no more than 16 bits wide, so a + * 16x16->32 bit multiply can be used instead of a full 32x32 multiply. + * For 12-bit samples, a full 32-bit multiplication will be needed. + */ + +#if BITS_IN_JSAMPLE == 8 +#define MULTIPLY(var,const) MULTIPLY16C16(var,const) +#else +#define MULTIPLY(var,const) ((var) * (const)) +#endif + +DCTELEM data[64]; + +/* + * Perform the forward DCT on one block of samples. + */ + +GLOBAL void +jpeg_fdct_islow () +{ + INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + INT32 tmp10, tmp11, tmp12, tmp13; + INT32 z1, z2, z3, z4, z5; + DCTELEM *dataptr; + int ctr; + SHIFT_TEMPS + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT; */ + /* furthermore, we scale the results by 2**PASS1_BITS. */ + + dataptr = data; + for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { + tmp0 = dataptr[0] + dataptr[7]; + tmp7 = dataptr[0] - dataptr[7]; + tmp1 = dataptr[1] + dataptr[6]; + tmp6 = dataptr[1] - dataptr[6]; + tmp2 = dataptr[2] + dataptr[5]; + tmp5 = dataptr[2] - dataptr[5]; + tmp3 = dataptr[3] + dataptr[4]; + tmp4 = dataptr[3] - dataptr[4]; + + /* Even part per LL&M figure 1 --- note that published figure is faulty; + * rotator "sqrt(2)*c1" should be "sqrt(2)*c6". + */ + + tmp10 = tmp0 + tmp3; + tmp13 = tmp0 - tmp3; + tmp11 = tmp1 + tmp2; + tmp12 = tmp1 - tmp2; + + dataptr[0] = (DCTELEM) ((tmp10 + tmp11) << PASS1_BITS); + dataptr[4] = (DCTELEM) ((tmp10 - tmp11) << PASS1_BITS); + + z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); + dataptr[2] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865), + CONST_BITS-PASS1_BITS); + dataptr[6] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065), + CONST_BITS-PASS1_BITS); + + /* Odd part per figure 8 --- note paper omits factor of sqrt(2). + * cK represents cos(K*pi/16). + * i0..i3 in the paper are tmp4..tmp7 here. + */ + + z1 = tmp4 + tmp7; + z2 = tmp5 + tmp6; + z3 = tmp4 + tmp6; + z4 = tmp5 + tmp7; + z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */ + + tmp4 = MULTIPLY(tmp4, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */ + tmp5 = MULTIPLY(tmp5, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ + tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ + tmp7 = MULTIPLY(tmp7, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */ + z1 = MULTIPLY(z1, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */ + z2 = MULTIPLY(z2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */ + z3 = MULTIPLY(z3, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */ + z4 = MULTIPLY(z4, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */ + + z3 += z5; + z4 += z5; + + dataptr[7] = (DCTELEM) DESCALE(tmp4 + z1 + z3, CONST_BITS-PASS1_BITS); + dataptr[5] = (DCTELEM) DESCALE(tmp5 + z2 + z4, CONST_BITS-PASS1_BITS); + dataptr[3] = (DCTELEM) DESCALE(tmp6 + z2 + z3, CONST_BITS-PASS1_BITS); + dataptr[1] = (DCTELEM) DESCALE(tmp7 + z1 + z4, CONST_BITS-PASS1_BITS); + + dataptr += DCTSIZE; /* advance pointer to next row */ + } + + /* Pass 2: process columns. + * We remove the PASS1_BITS scaling, but leave the results scaled up + * by an overall factor of 8. + */ + + dataptr = data; + for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { + tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*7]; + tmp7 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*7]; + tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*6]; + tmp6 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*6]; + tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*5]; + tmp5 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*5]; + tmp3 = dataptr[DCTSIZE*3] + dataptr[DCTSIZE*4]; + tmp4 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*4]; + + /* Even part per LL&M figure 1 --- note that published figure is faulty; + * rotator "sqrt(2)*c1" should be "sqrt(2)*c6". + */ + + tmp10 = tmp0 + tmp3; + tmp13 = tmp0 - tmp3; + tmp11 = tmp1 + tmp2; + tmp12 = tmp1 - tmp2; + + dataptr[DCTSIZE*0] = (DCTELEM) DESCALE(tmp10 + tmp11, PASS1_BITS); + dataptr[DCTSIZE*4] = (DCTELEM) DESCALE(tmp10 - tmp11, PASS1_BITS); + + z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); + dataptr[DCTSIZE*2] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865), + CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*6] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065), + CONST_BITS+PASS1_BITS); + + /* Odd part per figure 8 --- note paper omits factor of sqrt(2). + * cK represents cos(K*pi/16). + * i0..i3 in the paper are tmp4..tmp7 here. + */ + + z1 = tmp4 + tmp7; + z2 = tmp5 + tmp6; + z3 = tmp4 + tmp6; + z4 = tmp5 + tmp7; + z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */ + + tmp4 = MULTIPLY(tmp4, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */ + tmp5 = MULTIPLY(tmp5, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ + tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ + tmp7 = MULTIPLY(tmp7, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */ + z1 = MULTIPLY(z1, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */ + z2 = MULTIPLY(z2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */ + z3 = MULTIPLY(z3, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */ + z4 = MULTIPLY(z4, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */ + + z3 += z5; + z4 += z5; + + dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(tmp4 + z1 + z3, + CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp5 + z2 + z4, + CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp6 + z2 + z3, + CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp7 + z1 + z4, + CONST_BITS+PASS1_BITS); + + dataptr++; /* advance pointer to next column */ + } +} + +#endif /* DCT_ISLOW_SUPPORTED */ + + + + + +/* Main function + Time to function execution time using logic analyzer, + which measures the OFF time of a LED on board. + + The switching latency, including the function call/return time, + is measured to be equal to 1.1us (22 clock cycles). +*/ +void main(void) +{ + int i, seed; + + /* Worst case settings */ + /* Set array to random values */ + seed = 1; + for (i = 0; i < 64; i++) { + seed = ((seed * 133) + 81) % 65535; + data[i] = seed; + } + + jpeg_fdct_islow(); +} diff --git a/test/src/lcdnum.c b/test/src/lcdnum.c new file mode 100755 index 0000000..d62c09d --- /dev/null +++ b/test/src/lcdnum.c @@ -0,0 +1,71 @@ +/* MDH WCET BENCHMARK SUITE. */ + +/* 2012/09/28, Jan Gustafsson + * Changes: + * - The volatile variable n controls a loop, which is not correct. The loop will not terminate. Fixed. + */ + +/*********************************************************************** + * FILE: synthetic.c + * + * PURPOSE: demonstrate effect of flow facts for straight loops + * + * IDEA: reading from an in port mapped to a ten-item buffer, + * send first five characters to an LCD as numbers + * + ***********************************************************************/ + + +unsigned char num_to_lcd(unsigned char a) +{ + /* -0- 1 01 + * 1 2 2 4 02 04 + * -3- i.e. 8 i.e. 08 + * 4 5 16 32 10 20 + * -6- 64 40 + * + */ + switch(a) + { + case 0x00: return 0; + case 0x01: return 0x24; + case 0x02: return 1+4+8+16+64; + case 0x03: return 1+4+8+32+64; + case 0x04: return 2+4+8+32; + case 0x05: return 1+4+8+16+64; + case 0x06: return 1+2+8+16+32+64; + case 0x07: return 1+4+32; + case 0x08: return 0x7F; /* light all */ + case 0x09: return 0x0F + 32 + 64; + case 0x0A: return 0x0F + 16 + 32; + case 0x0B: return 2+8+16+32+64; + case 0x0C: return 1+2+16+64; + case 0x0D: return 4+8+16+32+64; + case 0x0E: return 1+2+8+16+64; + case 0x0F: return 1+2+8+16; + } + return 0; +} + +volatile unsigned char IN; +volatile unsigned char OUT; + +int main(void) +{ + int i; + unsigned char a; + /*volatile*/ int n; /* JG */ + + n = 10; + for(i=0; i< n; i++) + { + a = IN; /* scan port */ + if(i<5) + { + a = a &0x0F; + OUT = num_to_lcd(a); + } + } + return 0; +} + diff --git a/test/src/lms.c b/test/src/lms.c new file mode 100755 index 0000000..ef78de2 --- /dev/null +++ b/test/src/lms.c @@ -0,0 +1,271 @@ +/* MDH WCET BENCHMARK SUITE. */ + +/* 2012/09/28, Jan Gustafsson + * Changes: + * - This program redefines the C standard functions log, fabs, sqrt, and sin. Therefore, these function has been renamed with prefix lms_. + * - Warning: explicitly assigning a variable of type 'float' to itself + * x =x; + * removed. + * - Unused variables removed. + */ + + +/*************************************************************************/ +/* */ +/* SNU-RT Benchmark Suite for Worst Case Timing Analysis */ +/* ===================================================== */ +/* Collected and Modified by S.-S. Lim */ +/* sslim@archi.snu.ac.kr */ +/* Real-Time Research Group */ +/* Seoul National University */ +/* */ +/* */ +/* < Features > - restrictions for our experimental environment */ +/* */ +/* 1. Completely structured. */ +/* - There are no unconditional jumps. */ +/* - There are no exit from loop bodies. */ +/* (There are no 'break' or 'return' in loop bodies) */ +/* 2. No 'switch' statements. */ +/* 3. No 'do..while' statements. */ +/* 4. Expressions are restricted. */ +/* - There are no multiple expressions joined by 'or', */ +/* 'and' operations. */ +/* 5. No library calls. */ +/* - All the functions needed are implemented in the */ +/* source file. */ +/* */ +/* */ +/*************************************************************************/ +/* */ +/* FILE: lms.c */ +/* SOURCE : C Algorithms for Real-Time DSP by P. M. Embree */ +/* */ +/* DESCRIPTION : */ +/* */ +/* An LMS adaptive signal enhancement. The input signal is a sine */ +/* wave with added white noise. */ +/* The detailed description is in the program source code. */ +/* */ +/* REMARK : */ +/* */ +/* EXECUTION TIME : */ +/* */ +/* */ +/*************************************************************************/ + + + +#define RAND_MAX 32768 +#define PI 3.14159265358979323846 + + +/* function prototypes for fft and filter functions */ + +static float gaussian(void); + +#define N 201 +#define L 20 /* filter order, (length L+1) */ + +/* set convergence parameter */ +float mu = 0.01; + + +int lms_rand() +{ + static unsigned long next = 1; + + next = next * 1103515245 + 12345; + return (unsigned int)(next/65536) % 32768; +} + +static float lms_log(r) +float r; +{ + return 4.5; +} + +static float lms_fabs(float n) +{ + float f; + + if (n >= 0) f = n; + else f = -n; + return f; +} + +static float lms_sqrt(val) +float val; +{ + float x = val/10; + + float dx; + + double diff; + double min_tol = 0.00001; + + int i, flag; + + flag = 0; + if (val == 0 ) x = 0; + else { + for (i=1;i<20;i++) + { + if (!flag) { + dx = (val - (x*x)) / (2.0 * x); + x = x + dx; + diff = val - (x*x); + if (lms_fabs(diff) <= min_tol) flag = 1; + } + else { } /* JG */ +/* x =x; */ + } + } + return (x); +} + + +static float lms_sin(rad) +float rad; +{ + float app; + + float diff; + int inc = 1; + + while (rad > 2*PI) + rad -= 2*PI; + while (rad < -2*PI) + rad += 2*PI; + app = diff = rad; + diff = (diff * (-(rad*rad))) / + ((2.0 * inc) * (2.0 * inc + 1.0)); + app = app + diff; + inc++; + while(lms_fabs(diff) >= 0.00001) { + diff = (diff * (-(rad*rad))) / + ((2.0 * inc) * (2.0 * inc + 1.0)); + app = app + diff; + inc++; + } + + return(app); +} + +static float gaussian() +{ + static int ready = 0; /* flag to indicated stored value */ + static float gstore; /* place to store other value */ + static float rconst1 = (float)(2.0/RAND_MAX); + static float rconst2 = (float)(RAND_MAX/2.0); + float v1,v2,r,fac; + float gaus; + +/* make two numbers if none stored */ + if(ready == 0) { + v1 = (float)lms_rand() - rconst2; + v2 = (float)lms_rand() - rconst2; + v1 *= rconst1; + v2 *= rconst1; + r = v1*v1 + v2*v2; + while (r > 1.0f) { + v1 = (float)lms_rand() - rconst2; + v2 = (float)lms_rand() - rconst2; + v1 *= rconst1; + v2 *= rconst1; + r = v1*v1 + v2*v2; + } /* make radius less than 1 */ + +/* remap v1 and v2 to two Gaussian numbers */ + fac = lms_sqrt(-2.0f*lms_log(r)/r); + gstore = v1*fac; /* store one */ + gaus = v2*fac; /* return one */ + ready = 1; /* set ready flag */ + } + + else { + ready = 0; /* reset ready flag for next pair */ + gaus = gstore; /* return the stored one */ + } + + return(gaus); +} + + +int main() +{ + float lms(float,float,float *,int,float,float); + static float d[N],b[21]; + float signal_amp,noise_amp,arg,x/*,y*/; /* JG */ + int k; + +/* create signal plus noise */ + signal_amp = lms_sqrt(2.0); + noise_amp = 0.2*lms_sqrt(12.0); + arg = 2.0*PI/20.0; + for(k = 0 ; k < N ; k++) { + d[k] = signal_amp*lms_sin(arg*k) + noise_amp*gaussian(); + } + +/* scale based on L */ + mu = 2.0*mu/(L+1); + + x = 0.0; + for(k = 0 ; k < N ; k++) { + lms(x,d[k],b,L,mu,0.01); +/* delay x one sample */ + x = d[k]; + } + return 0; +} + +/* + function lms(x,d,b,l,mu,alpha) + +Implements NLMS Algorithm b(k+1)=b(k)+2*mu*e*x(k)/((l+1)*sig) + +x = input data +d = desired signal +b[0:l] = Adaptive coefficients of lth order fir filter +l = order of filter (> 1) +mu = Convergence parameter (0.0 to 1.0) +alpha = Forgetting factor sig(k)=alpha*(x(k)**2)+(1-alpha)*sig(k-1) + (>= 0.0 and < 1.0) + +returns the filter output +*/ + +float lms(float x,float d,float *b,int l, + float mu,float alpha) +{ + int ll; + float e,mu_e,/*lms_const,*/y; /* JG */ + static float px[51]; /* max L = 50 */ + static float sigma = 2.0; /* start at 2 and update internally */ + + px[0]=x; + +/* calculate filter output */ + y=b[0]*px[0]; +#ifdef DEBUG + printf("l=%d\n",l); +#endif + for(ll = 1 ; ll <= l ; ll++) + y=y+b[ll]*px[ll]; + +/* error signal */ + e=d-y; + +/* update sigma */ + sigma=alpha*(px[0]*px[0])+(1-alpha)*sigma; + mu_e=mu*e/sigma; + +/* update coefficients */ + for(ll = 0 ; ll <= l ; ll++) + b[ll]=b[ll]+mu_e*px[ll]; +/* update history */ + for(ll = l ; ll >= 1 ; ll--) + px[ll]=px[ll-1]; + + return(y); +} diff --git a/test/src/ludcmp.c b/test/src/ludcmp.c new file mode 100755 index 0000000..4415f19 --- /dev/null +++ b/test/src/ludcmp.c @@ -0,0 +1,149 @@ +/* MDH WCET BENCHMARK SUITE. File version $Id: ludcmp.c,v 1.2 2006/01/27 13:15:28 jgn Exp $ */ + +/*************************************************************************/ +/* */ +/* SNU-RT Benchmark Suite for Worst Case Timing Analysis */ +/* ===================================================== */ +/* Collected and Modified by S.-S. Lim */ +/* sslim@archi.snu.ac.kr */ +/* Real-Time Research Group */ +/* Seoul National University */ +/* */ +/* */ +/* < Features > - restrictions for our experimental environment */ +/* */ +/* 1. Completely structured. */ +/* - There are no unconditional jumps. */ +/* - There are no exit from loop bodies. */ +/* (There are no 'break' or 'return' in loop bodies) */ +/* 2. No 'switch' statements. */ +/* 3. No 'do..while' statements. */ +/* 4. Expressions are restricted. */ +/* - There are no multiple expressions joined by 'or', */ +/* 'and' operations. */ +/* 5. No library calls. */ +/* - All the functions needed are implemented in the */ +/* source file. */ +/* */ +/* */ +/*************************************************************************/ +/* */ +/* FILE: ludcmp.c */ +/* SOURCE : Turbo C Programming for Engineering */ +/* */ +/* DESCRIPTION : */ +/* */ +/* Simultaneous linear equations by LU decomposition. */ +/* The arrays a[][] and b[] are input and the array x[] is output */ +/* row vector. */ +/* The variable n is the number of equations. */ +/* The input arrays are initialized in function main. */ +/* */ +/* */ +/* REMARK : */ +/* */ +/* EXECUTION TIME : */ +/* */ +/* */ +/*************************************************************************/ + + +/* Changes: + * JG 2005/12/12: Indented program. Removed unused variable nmax. + */ + +/* +** Benchmark Suite for Real-Time Applications, by Sung-Soo Lim +** +** III-4. ludcmp.c : Simultaneous Linear Equations by LU Decomposition +** (from the book C Programming for EEs by Hyun Soon Ahn) +*/ + + + +double a[50][50], b[50], x[50]; + +int ludcmp( /* int nmax, */ int n, double eps); + + +static double +fabs(double n) +{ + double f; + + if (n >= 0) + f = n; + else + f = -n; + return f; +} + +int +main(void) +{ + + int i, j/*, nmax = 50*/, n = 5, chkerr; + double eps, w; + + eps = 1.0e-6; + + for (i = 0; i <= n; i++) { + w = 0.0; + for (j = 0; j <= n; j++) { + a[i][j] = (i + 1) + (j + 1); + if (i == j) + a[i][j] *= 10.0; + w += a[i][j]; + } + b[i] = w; + } + + chkerr = ludcmp( /* nmax, */ n, eps); + + return 0; + +} + +int +ludcmp( /* int nmax, */ int n, double eps) +{ + + int i, j, k; + double w, y[100]; + + if (n > 99 || eps <= 0.0) + return (999); + for (i = 0; i < n; i++) { + if (fabs(a[i][i]) <= eps) + return (1); + for (j = i + 1; j <= n; j++) { + w = a[j][i]; + if (i != 0) + for (k = 0; k < i; k++) + w -= a[j][k] * a[k][i]; + a[j][i] = w / a[i][i]; + } + for (j = i + 1; j <= n; j++) { + w = a[i + 1][j]; + for (k = 0; k <= i; k++) + w -= a[i + 1][k] * a[k][j]; + a[i + 1][j] = w; + } + } + y[0] = b[0]; + for (i = 1; i <= n; i++) { + w = b[i]; + for (j = 0; j < i; j++) + w -= a[i][j] * y[j]; + y[i] = w; + } + x[n] = y[n] / a[n][n]; + for (i = n - 1; i >= 0; i--) { + w = y[i]; + for (j = i + 1; j <= n; j++) + w -= a[i][j] * x[j]; + x[i] = w / a[i][i]; + } + return (0); + +} diff --git a/test/src/matmult.c b/test/src/matmult.c new file mode 100755 index 0000000..f11ac34 --- /dev/null +++ b/test/src/matmult.c @@ -0,0 +1,162 @@ +/* $Id: matmult.c,v 1.2 2005/04/04 11:34:58 csg Exp $ */ + +/* matmult.c */ +/* was mm.c! */ + + +/*----------------------------------------------------------------------* + * To make this program compile under our assumed embedded environment, + * we had to make several changes: + * - Declare all functions in ANSI style, not K&R. + * this includes adding return types in all cases! + * - Declare function prototypes + * - Disable all output + * - Disable all UNIX-style includes + * + * This is a program that was developed from mm.c to matmult.c by + * Thomas Lundqvist at Chalmers. + *----------------------------------------------------------------------*/ +#define UPPSALAWCET 1 + + +/* ***UPPSALA WCET***: + disable stupid UNIX includes */ +#ifndef UPPSALAWCET +#include +#include +#endif + +/* + * MATRIX MULTIPLICATION BENCHMARK PROGRAM: + * This program multiplies 2 square matrices resulting in a 3rd + * matrix. It tests a compiler's speed in handling multidimensional + * arrays and simple arithmetic. + */ + +#define UPPERLIMIT 20 + +typedef int matrix [UPPERLIMIT][UPPERLIMIT]; + +int Seed; +matrix ArrayA, ArrayB, ResultArray; + +#ifdef UPPSALAWCET +/* Our picky compiler wants prototypes! */ +void Multiply(matrix A, matrix B, matrix Res); +void InitSeed(void); +void Test(matrix A, matrix B, matrix Res); +void Initialize(matrix Array); +int RandomInteger(void); +#endif + +void main() +{ + InitSeed(); +/* ***UPPSALA WCET***: + no printing please! */ +#ifndef UPPSALAWCET + printf("\n *** MATRIX MULTIPLICATION BENCHMARK TEST ***\n\n"); + printf("RESULTS OF THE TEST:\n"); +#endif + Test(ArrayA, ArrayB, ResultArray); +} + + +void InitSeed(void) +/* + * Initializes the seed used in the random number generator. + */ +{ + /* ***UPPSALA WCET***: + changed Thomas Ls code to something simpler. + Seed = KNOWN_VALUE - 1; */ + Seed = 0; +} + + +void Test(matrix A, matrix B, matrix Res) +/* + * Runs a multiplication test on an array. Calculates and prints the + * time it takes to multiply the matrices. + */ +{ +#ifndef UPPSALAWCET + long StartTime, StopTime; + float TotalTime; +#endif + + Initialize(A); + Initialize(B); + + /* ***UPPSALA WCET***: don't print or time */ +#ifndef UPPSALAWCET + StartTime = ttime (); +#endif + + Multiply(A, B, Res); + + /* ***UPPSALA WCET***: don't print or time */ +#ifndef UPPSALAWCET + StopTime = ttime(); + TotalTime = (StopTime - StartTime) / 1000.0; + printf(" - Size of array is %d\n", UPPERLIMIT); + printf(" - Total multiplication time is %3.3f seconds\n\n", TotalTime); +#endif +} + + +void Initialize(matrix Array) +/* + * Intializes the given array with random integers. + */ +{ + int OuterIndex, InnerIndex; + + for (OuterIndex = 0; OuterIndex < UPPERLIMIT; OuterIndex++) + for (InnerIndex = 0; InnerIndex < UPPERLIMIT; InnerIndex++) + Array[OuterIndex][InnerIndex] = RandomInteger(); +} + + +int RandomInteger(void) +/* + * Generates random integers between 0 and 8095 + */ +{ + Seed = ((Seed * 133) + 81) % 8095; + return (Seed); +} + + +#ifndef UPPSALAWCET +int ttime() +/* + * This function returns in milliseconds the amount of compiler time + * used prior to it being called. + */ +{ + struct tms buffer; + int utime; + + /* times(&buffer); times not implemented */ + utime = (buffer.tms_utime / 60.0) * 1000.0; + return (utime); +} +#endif + +void Multiply(matrix A, matrix B, matrix Res) +/* + * Multiplies arrays A and B and stores the result in ResultArray. + */ +{ + register int Outer, Inner, Index; + + for (Outer = 0; Outer < UPPERLIMIT; Outer++) + for (Inner = 0; Inner < UPPERLIMIT; Inner++) + { + Res [Outer][Inner] = 0; + for (Index = 0; Index < UPPERLIMIT; Index++) + Res[Outer][Inner] += + A[Outer][Index] * B[Index][Inner]; + } +} diff --git a/test/src/minver.c b/test/src/minver.c new file mode 100755 index 0000000..20a6a61 --- /dev/null +++ b/test/src/minver.c @@ -0,0 +1,213 @@ +/* MDH WCET BENCHMARK SUITE */ + + +/* 2012/09/28, Jan Gustafsson + * Changes: + * - Missing braces around initialization of subobject added + * - This program redefines the standard C function fabs. Therefore, the + * function has been renamed to minver_fabs. + */ + +/*************************************************************************/ +/* */ +/* SNU-RT Benchmark Suite for Worst Case Timing Analysis */ +/* ===================================================== */ +/* Collected and Modified by S.-S. Lim */ +/* sslim@archi.snu.ac.kr */ +/* Real-Time Research Group */ +/* Seoul National University */ +/* */ +/* */ +/* < Features > - restrictions for our experimental environment */ +/* */ +/* 1. Completely structured. */ +/* - There are no unconditional jumps. */ +/* - There are no exit from loop bodies. */ +/* (There are no 'break' or 'return' in loop bodies) */ +/* 2. No 'switch' statements. */ +/* 3. No 'do..while' statements. */ +/* 4. Expressions are restricted. */ +/* - There are no multiple expressions joined by 'or', */ +/* 'and' operations. */ +/* 5. No library calls. */ +/* - All the functions needed are implemented in the */ +/* source file. */ +/* */ +/* */ +/*************************************************************************/ +/* */ +/* FILE: minver.c */ +/* SOURCE : Turbo C Programming for Engineering by Hyun Soo Ahn */ +/* */ +/* DESCRIPTION : */ +/* */ +/* Matrix inversion for 3x3 floating point matrix. */ +/* */ +/* REMARK : */ +/* */ +/* EXECUTION TIME : */ +/* */ +/* */ +/*************************************************************************/ + + +int minver(int row, int col, double eps); +int mmul(int row_a, int col_a, int row_b, int col_b); + +static double a[3][3] = { + {3.0, -6.0, 7.0}, + {9.0, 0.0, -5.0}, + {5.0, -8.0, 6.0}, +}; +double b[3][3], c[3][3], aa[3][3], a_i[3][3], e[3][3], det; + + +double minver_fabs(double n) +{ + double f; + + if (n >= 0) f = n; + else f = -n; + return f; +} + +int main() +{ + int i, j; + double eps; + + eps = 1.0e-6; + + for(i = 0; i < 3; i++) + for(j = 0; j < 3; j++) + aa[i][j] = a[i][j]; + + minver(3, 3, eps); + for(i = 0; i < 3; i++) + for(j = 0; j < 3; j++) + a_i[i][j] = a[i][j]; + + mmul(3, 3, 3, 3); + return 0; +} + + +int mmul(int row_a, int col_a, int row_b, int col_b) +{ + int i, j, k, row_c, col_c; + double w; + + row_c = row_a; + col_c = col_b; + + if(row_c < 1 || row_b < 1 || col_c < 1 || col_a != row_b) return(999); + for(i = 0; i < row_c; i++) + { + for(j = 0; j < col_c; j++) + { + w = 0.0; + for(k = 0; k < row_b; k++) + w += a[i][k] * b[k][j]; + c[i][j] = w; + } + } + return(0); + +} + + +int minver(int row, int col, double eps) +{ + + int work[500], i, j, k, r, iw, s, t, u, v; + double w, wmax, pivot, api, w1; + + if(row < 2 || row > 500 || eps <= 0.0) return(999); + w1 = 1.0; + for(i = 0; i < row; i++) + work[i] = i; + for(k = 0; k < row; k++) + { + wmax = 0.0; + for(i = k; i < row; i++) + { + w = minver_fabs(a[i][k]); + if(w > wmax) + { + wmax = w; + r = i; + } + } + pivot = a[r][k]; + api = minver_fabs(pivot); + if(api <= eps) + { + det = w1; + return(1); + } + w1 *= pivot; + u = k * col; + v = r * col; + if(r != k) + { + w1 = -w; + iw = work[k]; + work[k] = work[r]; + work[r] = iw; + for(j = 0; j < row; j++) + { + s = u + j; + t = v + j; + w = a[k][j]; + a[k][j] = a[r][j]; + a[r][j] = w; + } + } + for(i = 0; i < row; i++) + a[k][i] /= pivot; + for(i = 0; i < row; i++) + { + if(i != k) + { + v = i * col; + s = v + k; + w = a[i][k]; + if(w != 0.0) + { + for(j = 0; j < row; j++) + if(j != k) a[i][j] -= w * a[k][j]; + a[i][k] = -w / pivot; + } + } + } + a[k][k] = 1.0 / pivot; + } + for(i = 0; i < row; i++) + { + while(1) + { + k = work[i]; + if(k == i) break; + iw = work[k]; + work[k] = work[i]; + work[i] = iw; + for(j = 0; j < row; j++) + { + u = j * col; + s = u + i; + t = u + k; + w = a[k][i]; + a[k][i] = a[k][k]; + a[k][k] = w; + } + } + } + det = w1; + return(0); + +} + + + + + diff --git a/test/src/ndes.c b/test/src/ndes.c new file mode 100755 index 0000000..054817f --- /dev/null +++ b/test/src/ndes.c @@ -0,0 +1,238 @@ +/* MDH WCET BENCHMARK SUITE. */ + +/* 2012/10/03, Jan Gustafsson + * Changes: + * - init of "is" fixed (added a lot of brackets) + * - warning: array subscript is of type 'char': fixed in three places + */ + + +/* #include -- no include files in Uppsala tests, plz */ + +/* All output disabled for wcsim */ + +/* A read from this address will result in an known value of 1 */ +/* #define KNOWN_VALUE (int)(*((char *)0x80200001)) Changed JG/Ebbe */ +#define KNOWN_VALUE 1 + +/* A read from this address will result in an unknown value */ +#define UNKNOWN_VALUE (int)(*((char *)0x80200003)) + + +#define WORSTCASE 1 + +typedef struct IMMENSE { unsigned long l, r; } immense; +typedef struct GREAT { unsigned long l, c, r; } great; + +unsigned long bit[33]; + +static immense icd; +static char ipc1[57]={0,57,49,41,33,25,17,9,1,58,50, + 42,34,26,18,10,2,59,51,43,35,27,19,11,3,60, + 52,44,36,63,55,47,39,31,23,15,7,62,54,46,38, + 30,22,14,6,61,53,45,37,29,21,13,5,28,20,12,4}; +static char ipc2[49]={0,14,17,11,24,1,5,3,28,15,6,21, + 10,23,19,12,4,26,8,16,7,27,20,13,2,41,52,31, + 37,47,55,30,40,51,45,33,48,44,49,39,56,34, + 53,46,42,50,36,29,32}; + +void des(immense inp, immense key, int * newkey, int isw, immense * out); +unsigned long getbit(immense source, int bitno, int nbits); +void ks(/*immense key, */int n, great * kn); +void cyfun(unsigned long ir, great k, unsigned long * iout); + +void des(immense inp, immense key, int * newkey, int isw, immense * out) { + + static char ip[65] = + {0,58,50,42,34,26,18,10,2,60,52,44,36, + 28,20,12,4,62,54,46,38,30,22,14,6,64,56,48,40, + 32,24,16,8,57,49,41,33,25,17,9,1,59,51,43,35, + 27,19,11,3,61,53,45,37,29,21,13,5,63,55,47,39, + 31,23,15,7}; + static char ipm[65]= + {0,40,8,48,16,56,24,64,32,39,7,47,15, + 55,23,63,31,38,6,46,14,54,22,62,30,37,5,45,13, + 53,21,61,29,36,4,44,12,52,20,60,28,35,3,43,11, + 51,19,59,27,34,2,42,10,50,18,58,26,33,1,41,9, + 49,17,57,25}; + static great kns[17]; +#ifdef WORSTCASE + static int initflag=1; +#else + static int initflag=0; +#endif + int ii,i,j,k; + unsigned long ic,shifter,getbit(); + immense itmp; + great pg; + + if (initflag) { + initflag=0; + bit[1]=shifter=1L; + for(j=2;j<=32;j++) bit[j] = (shifter <<= 1); + } + if (*newkey) { + *newkey=0; + icd.r=icd.l=0L; + for (j=28,k=56;j>=1;j--,k--) { + icd.r = (icd.r <<= 1) | getbit(key,ipc1[j],32); + icd.l = (icd.l <<= 1) | getbit(key,ipc1[k],32); + } + + for(i=1;i<=16;i++) {pg = kns[i]; ks(/* key,*/ i, &pg); kns[i] = pg;} + } + itmp.r=itmp.l=0L; + for (j=32,k=64;j>=1;j--,k--) { + itmp.r = (itmp.r <<= 1) | getbit(inp,ip[j],32); + itmp.l = (itmp.l <<= 1) | getbit(inp,ip[k],32); + } + for (i=1;i<=16;i++) { + ii = (isw == 1 ? 17-i : i); + cyfun(itmp.l, kns[ii], &ic); + ic ^= itmp.r; + itmp.r=itmp.l; + itmp.l=ic; + } + ic=itmp.r; + itmp.r=itmp.l; + itmp.l=ic; + (*out).r=(*out).l=0L; + for (j=32,k=64; j >= 1; j--, k--) { + (*out).r = ((*out).r <<= 1) | getbit(itmp,ipm[j],32); + (*out).l = ((*out).l <<= 1) | getbit(itmp,ipm[k],32); + } +} +unsigned long getbit(immense source, int bitno, int nbits) { + if (bitno <= nbits) + return bit[bitno] & source.r ? 1L : 0L; + else + return bit[bitno-nbits] & source.l ? 1L : 0L; +} + +void ks(/*immense key, */int n, great * kn) { + int i,j,k,l; + + if (n == 1 || n == 2 || n == 9 || n == 16) { + icd.r = (icd.r | ((icd.r & 1L) << 28)) >> 1; + icd.l = (icd.l | ((icd.l & 1L) << 28)) >> 1; + } + else + for (i=1;i<=2;i++) { + icd.r = (icd.r | ((icd.r & 1L) << 28)) >> 1; + icd.l = (icd.l | ((icd.l & 1L) << 28)) >> 1; + } + (*kn).r=(*kn).c=(*kn).l=0; + for (j=16,k=32,l=48; j>=1; j--,k--,l--) { + (*kn).r=((*kn).r <<= 1) | (unsigned short) + getbit(icd,ipc2[j],28); + (*kn).c=((*kn).c <<= 1) | (unsigned short) + getbit(icd,ipc2[k],28); + (*kn).l=((*kn).l <<= 1) | (unsigned short) + getbit(icd,ipc2[l],28); + } +} + +void cyfun(unsigned long ir, great k, unsigned long * iout) { + static int iet[49]={0,32,1,2,3,4,5,4,5,6,7,8,9,8,9, + 10,11,12,13,12,13,14,15,16,17,16,17,18,19, + 20,21,20,21,22,23,24,25,24,25,26,27,28,29, + 28,29,30,31,32,1}; + static int ipp[33]={0,16,7,20,21,29,12,28,17,1,15, + 23,26,5,18,31,10,2,8,24,14,32,27,3,9,19,13, + 30,6,22,11,4,25}; + static char is[16][4][9]={ + {{0,14,15,10,7,2,12,4,13},{0,0,3,13,13,14,10,13,1}, + {0,4,0,13,10,4,9,1,7},{0,15,13,1,3,11,4,6, 2}}, + {{0,4,1,0,13,12,1,11,2},{0,15,13,7,8,11,15,0,15}, + {0,1,14,6,6,2,14,4,11},{0,12,8,10,15,8,3,11,1}}, + {{0,13,8,9,14,4,10,2,8},{0,7,4,0,11,2,4,11,13}, + {0,14,7,4,9,1,15,11,4},{0,8,10,13,0,12,2,13,14}}, + {{0,1,14,14,3,1,15,14,4},{0,4,7,9,5,12,2,7,8}, + {0,8,11,9,0,11,5,13,1},{0,2,1,0,6,7,12,8,7}}, + {{0,2,6,6,0,7,9,15,6},{0,14,15,3,6,4,7,4,10}, + {0,13,10,8,12,10,2,12,9},{0,4,3,6,10,1,9,1,4}}, + {{0,15,11,3,6,10,2,0,15},{0,2,2,4,15,7,12,9,3}, + {0,6,4,15,11,13,8,3,12},{0,9,15,9,1,14,5,4,10}}, + {{0,11,3,15,9,11,6,8,11},{0,13,8,6,0,13,9,1,7}, + {0,2,13,3,7,7,12,7,14},{0,1,4,8,13,2,15,10,8}}, + {{0,8,4,5,10,6,8,13,1},{0,1,14,10,3,1,5,10,4}, + {0,11,1,0,13,8,3,14,2},{0,7,2,7,8,13,10,7,13}}, + {{0,3,9,1,1,8,0,3,10},{0,10,12,2,4,5,6,14,12}, + {0,15,5,11,15,15,7,10,0},{0,5,11,4,9,6,11,9,15}}, + {{0,10,7,13,2,5,13,12,9},{0,6,0,8,7,0,1,3,5}, + {0,12,8,1,1,9,0,15,6},{0,11,6,15,4,15,14,5,12}}, + {{0,6,2,12,8,3,3,9,3},{0,12,1,5,2,15,13,5,6}, + {0,9,12,2,3,12,4,6,10},{0,3,7,14,5,0,1,0,9}}, + {{0,12,13,7,5,15,4,7,14},{0,11,10,14,12,10,14,12,11}, + {0,7,6,12,14,5,10,8,13},{0,14,12,3,11,9,7,15,0}}, + {{0,5,12,11,11,13,14,5,5},{0,9,6,12,1,3,0,2,0}, + {0,3,9,5,5,6,1,0,15},{0,10,0,11,12,10,6,14,3}}, + {{0,9,0,4,12,0,7,10,0},{0,5,9,11,10,9,11,15,14}, + {0,10,3,10,2,3,13,5,3},{0,0,5,5,7,4,0,2,5}}, + {{0,0,5,2,4,14,5,6,12},{0,3,11,15,14,8,3,8,9}, + {0,5,2,14,8,0,11,9,5},{0,6,14,2,2,5,8,3,6}}, + {{0,7,10,8,15,9,11,1,7},{0,8,5,1,9,6,8,6,2}, + {0,0,15,7,4,14,6,2,8},{0,13,9,12,14,3,13,12,11}}}; + static char ibin[16]={0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15}; + great ie; + unsigned long itmp,ietmp1,ietmp2; + char iec[9]; + int jj,irow,icol,iss,j,l,m; + unsigned long *p; + + p = bit; + ie.r=ie.c=ie.l=0; + for (j=16,l=32,m=48;j>=1;j--,l--,m--) { + ie.r = (ie.r <<=1) | (p[iet[j]] & ir ? 1 : 0); + ie.c = (ie.c <<=1) | (p[iet[l]] & ir ? 1 : 0); + ie.l = (ie.l <<=1) | (p[iet[m]] & ir ? 1 : 0); + } + ie.r ^= k.r; + ie.c ^= k.c; + ie.l ^= k.l; + ietmp1=((unsigned long) ie.c << 16)+(unsigned long) ie.r; + ietmp2=((unsigned long) ie.l << 8)+((unsigned long) ie.c >> 8); + for (j=1,m=5;j<=4;j++,m++) { + iec[j]=ietmp1 & 0x3fL; + iec[m]=ietmp2 & 0x3fL; + ietmp1 >>= 6; + ietmp2 >>= 6; + } + itmp=0L; + for (jj=8;jj>=1;jj--) { + j =iec[jj]; + irow=((j & 0x1) << 1)+((j & 0x20) >> 5); + icol=((j & 0x2) << 2)+(j & 0x4) + +((j & 0x8) >> 2)+((j & 0x10) >> 4); + iss=is[icol][irow][jj]; + itmp = (itmp <<= 4) | ibin[iss]; + } + *iout=0L; + p = bit; + for (j=32;j>=1;j--) + *iout = (*iout <<= 1) | (p[ipp[j]] & itmp ? 1 : 0); +} +#ifdef WORSTCASE + int value = 1; +#else + int value = 0; +#endif + +int main(void) +{ + immense inp, key, out; + int newkey, isw; + + inp.l = KNOWN_VALUE * 35; + inp.r = KNOWN_VALUE * 26; + key.l = KNOWN_VALUE * 2; + key.r = KNOWN_VALUE * 16; + + newkey = value; + isw = value; + + des(inp, key, &newkey, isw, &out); +/* printf("%u %u\n", out.l, out.r);*/ + return 0; +} + diff --git a/test/src/ns.c b/test/src/ns.c new file mode 100755 index 0000000..99f8b85 --- /dev/null +++ b/test/src/ns.c @@ -0,0 +1,531 @@ +/* $Id: ns.c,v 1.2 2005/04/04 11:34:58 csg Exp $ */ + +/* Test of deeply nested loops and non-local exits */ + + +/*-------------------------------------------------- * + * LOG: + * $Log: ns.c,v $ + * Revision 1.2 2005/04/04 11:34:58 csg + * again + * + * Revision 1.1 2005/03/29 11:28:43 jgn + * New file. + * + * Revision 1.8 2001/05/07 10:05:37 ijae + * no message + * + * Revision 1.7 2001/04/25 12:48:15 ijae + * Corrected trace names. + * + * Revision 1.6 2001/04/25 12:17:47 ijae + * no message + * + * Revision 1.5 2001/04/25 12:11:31 ijae + * Compilable for V850 + * + * Revision 1.4 2001/04/25 12:09:55 ijae + * Now in target mode. + * + * Revision 1.3 2001/04/25 12:06:36 ijae + * Now 4D array. Compiles & runs on PC + * + * Revision 1.2 2001/04/25 11:59:38 ijae + * A bit more comments. + * + *-------------------------------------------------- */ + + +/* -------------------------------------------------- * + * Define TEST to check the # iterations in inner loop, + * and that the right value is found and returned + * -------------------------------------------------- */ + +//#define TEST + +/* -------------------------------------------------- + * Array of keys and values, 4-dimensional just + * for the fun of it. + * -------------------------------------------------- */ +int keys[5][5][5][5] = +{ + // [0] + { + // [0][0] + { + {0,0,0,0,0}, + {0,0,0,0,0}, + {0,0,0,0,0}, + {0,0,0,0,0}, + {0,0,0,0,0} + }, + // [0][1] + { + {0,0,0,0,0}, + {0,0,0,0,0}, + {0,0,0,0,0}, + {0,0,0,0,0}, + {0,0,0,0,0} + }, + // [0][2] + { + {0,0,0,0,0}, + {0,0,0,0,0}, + {0,0,0,0,0}, + {0,0,0,0,0}, + {0,0,0,0,0} + }, + // [0][3] + { + {0,0,0,0,0}, + {0,0,0,0,0}, + {0,0,0,0,0}, + {0,0,0,0,0}, + {0,0,0,0,0} + }, + // [0][4] + { + {0,0,0,0,0}, + {0,0,0,0,0}, + {0,0,0,0,0}, + {0,0,0,0,0}, + {0,0,0,0,0} + } + }, + // [1] + { + // [1][0] + { + {1,1,1,1,1}, + {1,1,1,1,1}, + {1,1,1,1,1}, + {1,1,1,1,1}, + {1,1,1,1,1} + }, + // [1][1] + { + {1,1,1,1,1}, + {1,1,1,1,1}, + {1,1,1,1,1}, + {1,1,1,1,1}, + {1,1,1,1,1} + }, + // [1][2] + { + {1,1,1,1,1}, + {1,1,1,1,1}, + {1,1,1,1,1}, + {1,1,1,1,1}, + {1,1,1,1,1} + }, + // [1][3] + { + {1,1,1,1,1}, + {1,1,1,1,1}, + {1,1,1,1,1}, + {1,1,1,1,1}, + {1,1,1,1,1} + }, + // [1][4] + { + {1,1,1,1,1}, + {1,1,1,1,1}, + {1,1,1,1,1}, + {1,1,1,1,1}, + {1,1,1,1,1} + } + }, + // [2] + { + // [2][0] + { + {2,2,2,2,2}, + {2,2,2,2,2}, + {2,2,2,2,2}, + {2,2,2,2,2}, + {2,2,2,2,2} + }, + // [2][1] + { + {2,2,2,2,2}, + {2,2,2,2,2}, + {2,2,2,2,2}, + {2,2,2,2,2}, + {2,2,2,2,2} + }, + // [2][2] + { + {2,2,2,2,2}, + {2,2,2,2,2}, + {2,2,2,2,2}, + {2,2,2,2,2}, + {2,2,2,2,2} + }, + // [2][3] + { + {2,2,2,2,2}, + {2,2,2,2,2}, + {2,2,2,2,2}, + {2,2,2,2,2}, + {2,2,2,2,2} + }, + // [2][4] + { + {2,2,2,2,2}, + {2,2,2,2,2}, + {2,2,2,2,2}, + {2,2,2,2,2}, + {2,2,2,2,2} + } + }, + // [3] + { + // [3][0] + { + {3,3,3,3,3}, + {3,3,3,3,3}, + {3,3,3,3,3}, + {3,3,3,3,3}, + {3,3,3,3,3} + }, + // [3][1] + { + {3,3,3,3,3}, + {3,3,3,3,3}, + {3,3,3,3,3}, + {3,3,3,3,3}, + {3,3,3,3,3} + }, + // [3][2] + { + {3,3,3,3,3}, + {3,3,3,3,3}, + {3,3,3,3,3}, + {3,3,3,3,3}, + {3,3,3,3,3} + }, + // [3][3] + { + {3,3,3,3,3}, + {3,3,3,3,3}, + {3,3,3,3,3}, + {3,3,3,3,3}, + {3,3,3,3,3} + }, + // [3][4] + { + {3,3,3,3,3}, + {3,3,3,3,3}, + {3,3,3,3,3}, + {3,3,3,3,3}, + {3,3,3,3,3} + } + }, + // [4] + { + // [4][0] + { + {4,4,4,4,4}, + {4,4,4,4,4}, + {4,4,4,4,4}, + {4,4,4,4,4}, + {4,4,4,4,4} + }, + // [4][1] + { + {4,4,4,4,4}, + {4,4,4,4,4}, + {4,4,4,4,4}, + {4,4,4,4,4}, + {4,4,4,4,4} + }, + // [4][2] + { + {4,4,4,4,4}, + {4,4,4,4,4}, + {4,4,4,4,4}, + {4,4,4,4,4}, + {4,4,4,4,4} + }, + // [4][3] + { + {4,4,4,4,4}, + {4,4,4,4,4}, + {4,4,4,4,4}, + {4,4,4,4,4}, + {4,4,4,4,4} + }, + // [4][4] + { + {4,4,4,4,4}, + {4,4,4,4,4}, + {4,4,4,4,4}, + {4,4,4,4,4}, + {4,4,4,4, +#ifdef FIND_TARGET + 400 +#else + 401 /* not searched for */ +#endif + } + } + } +}; + + + +int answer[5][5][5][5] = +{ + // [0] + { + // [0][0] + { + {123,123,123,123,123}, + {123,123,123,123,123}, + {123,123,123,123,123}, + {123,123,123,123,123}, + {123,123,123,123,123} + }, + // [0][1] + { + {123,123,123,123,123}, + {123,123,123,123,123}, + {123,123,123,123,123}, + {123,123,123,123,123}, + {123,123,123,123,123} + }, + // [0][2] + { + {123,123,123,123,123}, + {123,123,123,123,123}, + {123,123,123,123,123}, + {123,123,123,123,123}, + {123,123,123,123,123} + }, + // [0][3] + { + {123,123,123,123,123}, + {123,123,123,123,123}, + {123,123,123,123,123}, + {123,123,123,123,123}, + {123,123,123,123,123} + }, + // [0][4] + { + {123,123,123,123,123}, + {123,123,123,123,123}, + {123,123,123,123,123}, + {123,123,123,123,123}, + {123,123,123,123,123} + } + }, + // [1] + { + // [1][0] + { + {234,234,234,234,234}, + {234,234,234,234,234}, + {234,234,234,234,234}, + {234,234,234,234,234}, + {234,234,234,234,234} + }, + // [1][1] + { + {234,234,234,234,234}, + {234,234,234,234,234}, + {234,234,234,234,234}, + {234,234,234,234,234}, + {234,234,234,234,234} + }, + // [1][2] + { + {234,234,234,234,234}, + {234,234,234,234,234}, + {234,234,234,234,234}, + {234,234,234,234,234}, + {234,234,234,234,234} + }, + // [1][3] + { + {234,234,234,234,234}, + {234,234,234,234,234}, + {234,234,234,234,234}, + {234,234,234,234,234}, + {234,234,234,234,234} + }, + // [1][4] + { + {234,234,234,234,234}, + {234,234,234,234,234}, + {234,234,234,234,234}, + {234,234,234,234,234}, + {234,234,234,234,234} + } + }, + // [2] + { + // [2][0] + { + {345,345,345,345}, + {345,345,345,345}, + {345,345,345,345}, + {345,345,345,345}, + {345,345,345,345} + }, + // [2][1] + { + {345,345,345,345}, + {345,345,345,345}, + {345,345,345,345}, + {345,345,345,345}, + {345,345,345,345} + }, + // [2][2] + { + {345,345,345,345}, + {345,345,345,345}, + {345,345,345,345}, + {345,345,345,345}, + {345,345,345,345} + }, + // [2][3] + { + {345,345,345,345}, + {345,345,345,345}, + {345,345,345,345}, + {345,345,345,345}, + {345,345,345,345} + }, + // [2][4] + { + {345,345,345,345}, + {345,345,345,345}, + {345,345,345,345}, + {345,345,345,345}, + {345,345,345,345} + } + }, + // [3] + { + // [3][0] + { + {456,456,456,456,456}, + {456,456,456,456,456}, + {456,456,456,456,456}, + {456,456,456,456,456}, + {456,456,456,456,456} + }, + // [3][1] + { + {456,456,456,456,456}, + {456,456,456,456,456}, + {456,456,456,456,456}, + {456,456,456,456,456}, + {456,456,456,456,456} + }, + // [3][2] + { + {456,456,456,456,456}, + {456,456,456,456,456}, + {456,456,456,456,456}, + {456,456,456,456,456}, + {456,456,456,456,456} + }, + // [3][3] + { + {456,456,456,456,456}, + {456,456,456,456,456}, + {456,456,456,456,456}, + {456,456,456,456,456}, + {456,456,456,456,456} + }, + // [3][4] + { + {456,456,456,456,456}, + {456,456,456,456,456}, + {456,456,456,456,456}, + {456,456,456,456,456}, + {456,456,456,456,456} + } + }, + // [4] + { + // [4][0] + { + {567,567,567,567,567}, + {567,567,567,567,567}, + {567,567,567,567,567}, + {567,567,567,567,567}, + {567,567,567,567,567} + }, + // [4][1] + { + {567,567,567,567,567}, + {567,567,567,567,567}, + {567,567,567,567,567}, + {567,567,567,567,567}, + {567,567,567,567,567} + }, + // [4][2] + { + {567,567,567,567,567}, + {567,567,567,567,567}, + {567,567,567,567,567}, + {567,567,567,567,567}, + {567,567,567,567,567} + }, + // [4][3] + { + {567,567,567,567,567}, + {567,567,567,567,567}, + {567,567,567,567,567}, + {567,567,567,567,567}, + {567,567,567,567,567} + }, + // [4][4] + { + {567,567,567,567,567}, + {567,567,567,567,567}, + {567,567,567,567,567}, + {567,567,567,567,567}, + {567,567,567,567,1111} + } + } +}; + + +int foo(int x) +{ +#ifdef TEST + int c = 0; /* counter for innerloop */ +#endif + int i,j,k,l; + + for(i=0; i<5; i++) + for(j=0 ; j<5 ; j++) + for(k=0 ; k<5 ; k++) + for(l=0 ; l<5 ; l++) + { +#ifdef TEST + c++; +#endif + if( keys[i][j][k][l] == x ) + { +#ifdef TEST + printf(" %d\n",c); +#endif + return answer[i][j][k][l] + keys[i][j][k][l]; + } + } + return -1; +} + + +void main(void) +{ +#ifdef TEST + printf("result=%d\n",foo(400)); +#else + foo(400); +#endif +} diff --git a/test/src/nsichneu.c b/test/src/nsichneu.c new file mode 100755 index 0000000..3004438 --- /dev/null +++ b/test/src/nsichneu.c @@ -0,0 +1,4253 @@ +/* $Id: nsichneu.c,v 1.3 2005/04/15 09:18:42 jgn Exp $ */ + +/*************************************************************************** +* +* Copyright (c) 1998/1999, C-LAB, Paderborn +* +* File generated by Sea2CHaRy +* +* This file containes a CHaRy description generated from the +* file: NSicherNeu.sea +* The name of the Pr/T-Net was: NSicherNeu +* +* Modified and maintained by +* Friedhelm Stappert +* C-LAB, Paderborn, Germany +* fst@c-lab.de +* +* Modifications: +* o made the local variables global and volatile, +* so the compiler won't optimise everything away. +* + +***************************************************************************/ +// #define DO_TRACING + +#ifdef DO_TRACING // ON PC + +#include +#define TRACE(x) trace((x)) +#undef TEST /* finished testing! */ +void trace(char *s) +{ + printf("%s\n",s); +} + +#else // ON TARGET + +#define TRACE(x) +#undef TEST + +#endif + + + +volatile int P1_is_marked = 3; +volatile long P1_marking_member_0[3]; +volatile int P2_is_marked = 5; +volatile long P2_marking_member_0[5]; +volatile int P3_is_marked = 0; +volatile long P3_marking_member_0[6]; + + + +/**void NSicherNeu()**/ +int main() +{ + int dummy_i; +/* dummy_i = 17; Takes too much time */ + dummy_i = 2; + + while (dummy_i > 0) { + + dummy_i--; + /* Permutation for Place P1 : 0, 1, 2 */ + /* Transition T1 */ + if ( (P1_is_marked >= 3) && + (P3_is_marked + 3 <= 6) && + (P1_marking_member_0[1] == P1_marking_member_0[2]) ) { + + long x; + long y; + long z; + + x = P1_marking_member_0[0]; + y = P1_marking_member_0[1]; + + /* Transition condition */ + if (x < y) { + + /* demarking of input places */ + P1_is_marked -= 3; + + /* preaction */ + z = x - y; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = x; + P3_marking_member_0[P3_is_marked+1] = y; + P3_marking_member_0[P3_is_marked+2] = z; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + /* Permutation for Place P1 : 0, 2, 1 */ + /* Transition T1 */ + if ( (P1_is_marked >= 3) && + (P3_is_marked + 3 <= 6) && + (P1_marking_member_0[2] == P1_marking_member_0[1]) ) { + + long x; + long y; + long z; + + x = P1_marking_member_0[0]; + y = P1_marking_member_0[2]; + + /* Transition condition */ + if ((x < y)) { + + + /* demarking of input places */ + P1_is_marked -= 3; + + /* preaction */ + z = x - y; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = x; + P3_marking_member_0[P3_is_marked+1] = y; + P3_marking_member_0[P3_is_marked+2] = z; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + /* Permutation for Place P1 : 1, 0, 2 */ + /* Transition T1 */ + if ( (P1_is_marked >= 3) && + (P3_is_marked + 3 <= 6) && + (P1_marking_member_0[0] == P1_marking_member_0[2]) ) { + + long x; + long y; + long z; + + x = P1_marking_member_0[1]; + y = P1_marking_member_0[0]; + + /* Transition condition */ + if (x < y) { + + + /* demarking of input places */ + P1_is_marked -= 3; + + /* preaction */ + z = x - y; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = x; + P3_marking_member_0[P3_is_marked+1] = y; + P3_marking_member_0[P3_is_marked+2] = z; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + /* Permutation for Place P1 : 1, 2, 0 */ + /* Transition T1 */ + if ( (P1_is_marked >= 3) && + (P3_is_marked + 3 <= 6) && + (P1_marking_member_0[2] == P1_marking_member_0[0])) { + + long x; + long y; + long z; + + x = P1_marking_member_0[1]; + y = P1_marking_member_0[2]; + + /* Transition condition */ + if ((x < y)) { + + + /* demarking of input places */ + P1_is_marked -= 3; + + /* preaction */ + z = x - y; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = x; + P3_marking_member_0[P3_is_marked+1] = y; + P3_marking_member_0[P3_is_marked+2] = z; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + /* Permutation for Place P1 : 2, 0, 1 */ + /* Transition T1 */ + if ( (P1_is_marked >= 3) && + (P3_is_marked + 3 <= 6) && + (P1_marking_member_0[0] == P1_marking_member_0[1]) ) { + long x; + long y; + long z; + + x = P1_marking_member_0[2]; + y = P1_marking_member_0[0]; + + /* Transition condition */ + if ((x < y)) { + + /* demarking of input places */ + P1_is_marked -= 3; + + /* preaction */ + z = x - y; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = x; + P3_marking_member_0[P3_is_marked+1] = y; + P3_marking_member_0[P3_is_marked+2] = z; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + /* Permutation for Place P1 : 2, 1, 0 */ + /* Transition T1 */ + if ( (P1_is_marked >= 3) && + (P3_is_marked + 3 <= 6) && + (P1_marking_member_0[1] == P1_marking_member_0[0]) ) { + long x; + long y; + long z; + + x = P1_marking_member_0[2]; + y = P1_marking_member_0[1]; + + /* Transition condition */ + if ((x < y)) { + + /* demarking of input places */ + P1_is_marked -= 3; + + /* preaction */ + z = x - y; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = x; + P3_marking_member_0[P3_is_marked+1] = y; + P3_marking_member_0[P3_is_marked+2] = z; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + /* Permutation for Place P2 : 0, 1, 2, 3 */ + /* Transition T2 */ + if ( (P2_is_marked >= 4) && + (((P3_is_marked + 3) <= 6)) && + ( ((P2_marking_member_0[1] == P2_marking_member_0[2])) && + ((P2_marking_member_0[1] == P2_marking_member_0[3])) ) ) { + long a; + long b; + long c; + + a = P2_marking_member_0[0]; + b = P2_marking_member_0[1]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + /* Permutation for Place P2 : 0, 1, 3, 2 */ + /* Transition T2 */ + if ( (P2_is_marked >= 4) && + (((P3_is_marked + 3) <= 6)) && + ( (P2_marking_member_0[1] == P2_marking_member_0[3]) && + (P2_marking_member_0[1] == P2_marking_member_0[2]) ) ) { + long a; + long b; + long c; + + a = P2_marking_member_0[0]; + b = P2_marking_member_0[1]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + /* Permutation for Place P2 : 0, 2, 1, 3 */ + /* Transition T2 */ + if ( (P2_is_marked >= 4) && + ((P3_is_marked + 3) <= 6) && + ( (P2_marking_member_0[2] == P2_marking_member_0[1]) && + (P2_marking_member_0[2] == P2_marking_member_0[3]) ) ) { + long a; + long b; + long c; + + a = P2_marking_member_0[0]; + b = P2_marking_member_0[2]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + /* Permutation for Place P2 : 0, 2, 3, 1 */ + /* Transition T2 */ + if ( (P2_is_marked >= 4) && + ((P3_is_marked + 3) <= 6) && + ( (P2_marking_member_0[2] == P2_marking_member_0[3]) && + (P2_marking_member_0[2] == P2_marking_member_0[1]) ) ) { + long a; + long b; + long c; + + a = P2_marking_member_0[0]; + b = P2_marking_member_0[2]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + /* Permutation for Place P2 : 0, 3, 1, 2 */ + /* Transition T2 */ + if ( (P2_is_marked >= 4) && + ((P3_is_marked + 3) <= 6) && + ( (P2_marking_member_0[3] == P2_marking_member_0[1]) && + (P2_marking_member_0[3] == P2_marking_member_0[2]) ) ) { + long a; + long b; + long c; + + a = P2_marking_member_0[0]; + b = P2_marking_member_0[3]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + /* Permutation for Place P2 : 0, 3, 2, 1 */ + /* Transition T2 */ + if ( (P2_is_marked >= 4) && + ((P3_is_marked + 3) <= 6) && + ( (P2_marking_member_0[3] == P2_marking_member_0[2]) && + (P2_marking_member_0[3] == P2_marking_member_0[1]) ) ) { + long a; + long b; + long c; + + a = P2_marking_member_0[0]; + b = P2_marking_member_0[3]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + /* Permutation for Place P2 : 1, 0, 2, 3 */ + /* Transition T2 */ + if ( (P2_is_marked >= 4) && + ((P3_is_marked + 3) <= 6) && + ( (P2_marking_member_0[0] == P2_marking_member_0[2]) && + (P2_marking_member_0[0] == P2_marking_member_0[3]) ) ) { + long a; + long b; + long c; + + a = P2_marking_member_0[1]; + b = P2_marking_member_0[0]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + /* Permutation for Place P2 : 1, 0, 3, 2 */ + /* Transition T2 */ + if ( (P2_is_marked >= 4) && + ((P3_is_marked + 3) <= 6) && + ( (P2_marking_member_0[0] == P2_marking_member_0[3]) && + (P2_marking_member_0[0] == P2_marking_member_0[2]) ) ) { + long a; + long b; + long c; + + a = P2_marking_member_0[1]; + b = P2_marking_member_0[0]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + /* Permutation for Place P2 : 1, 2, 0, 3 */ + /* Transition T2 */ + if ( (P2_is_marked >= 4) && + ((P3_is_marked + 3) <= 6) && + ( (P2_marking_member_0[2] == P2_marking_member_0[0]) && + (P2_marking_member_0[2] == P2_marking_member_0[3]) ) ) { + long a; + long b; + long c; + + a = P2_marking_member_0[1]; + b = P2_marking_member_0[2]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + /* Permutation for Place P2 : 1, 2, 3, 0 */ + /* Transition T2 */ + if ( (P2_is_marked >= 4) && + ((P3_is_marked + 3) <= 6) && + ( (P2_marking_member_0[2] == P2_marking_member_0[3]) && + (P2_marking_member_0[2] == P2_marking_member_0[0]) ) ) { + long a; + long b; + long c; + + a = P2_marking_member_0[1]; + b = P2_marking_member_0[2]; + + /* Transition condition */ + if ((b > a)) { + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + /* Permutation for Place P2 : 1, 3, 0, 2 */ + /* Transition T2 */ + if ( (P2_is_marked >= 4) && + ((P3_is_marked + 3) <= 6) && + ( (P2_marking_member_0[3] == P2_marking_member_0[0]) && + (P2_marking_member_0[3] == P2_marking_member_0[2]) ) ) { + long a; + long b; + long c; + + a = P2_marking_member_0[1]; + b = P2_marking_member_0[3]; + + /* Transition condition */ + if ((b > a)) { + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 1, 3, 2, 0 */ + /* Transition T2 */ + if ( (P2_is_marked >= 4) && + ((P3_is_marked + 3) <= 6) && + ( (P2_marking_member_0[3] == P2_marking_member_0[2]) && + (P2_marking_member_0[3] == P2_marking_member_0[0]) ) ) { + long a; + long b; + long c; + + a = P2_marking_member_0[1]; + b = P2_marking_member_0[3]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 2, 0, 1, 3 */ + /* Transition T2 */ + if ( (P2_is_marked >= 4) && + ((P3_is_marked + 3) <= 6) && + ( (P2_marking_member_0[0] == P2_marking_member_0[1]) && + (P2_marking_member_0[0] == P2_marking_member_0[3]) ) ) { + long a; + long b; + long c; + + a = P2_marking_member_0[2]; + b = P2_marking_member_0[0]; + + /* Transition condition */ + if ((b > a)) { + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + /* Permutation for Place P2 : 2, 0, 3, 1 */ + /* Transition T2 */ + if ( (P2_is_marked >= 4) && + ((P3_is_marked + 3) <= 6) && + ( (P2_marking_member_0[0] == P2_marking_member_0[3]) && + (P2_marking_member_0[0] == P2_marking_member_0[1]) ) ) { + long a; + long b; + long c; + + a = P2_marking_member_0[2]; + b = P2_marking_member_0[0]; + + /* Transition condition */ + if ((b > a)) { + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + /* Permutation for Place P2 : 2, 1, 0, 3 */ + /* Transition T2 */ + if ( (P2_is_marked >= 4) && + ((P3_is_marked + 3) <= 6) && + ( (P2_marking_member_0[1] == P2_marking_member_0[0]) && + (P2_marking_member_0[1] == P2_marking_member_0[3]) ) ) { + long a; + long b; + long c; + + a = P2_marking_member_0[2]; + b = P2_marking_member_0[1]; + + /* Transition condition */ + if ((b > a)) { + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + /* Permutation for Place P2 : 2, 1, 3, 0 */ + /* Transition T2 */ + if ( (P2_is_marked >= 4) && + ((P3_is_marked + 3) <= 6) && + ( (P2_marking_member_0[1] == P2_marking_member_0[3]) && + (P2_marking_member_0[1] == P2_marking_member_0[0]) ) ) { + long a; + long b; + long c; + + a = P2_marking_member_0[2]; + b = P2_marking_member_0[1]; + + /* Transition condition */ + if ((b > a)) { + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + /* Permutation for Place P2 : 2, 3, 0, 1 */ + /* Transition T2 */ + if ( (P2_is_marked >= 4) && + ((P3_is_marked + 3) <= 6) && + ( (P2_marking_member_0[3] == P2_marking_member_0[0]) && + (P2_marking_member_0[3] == P2_marking_member_0[1]) ) ) { + long a; + long b; + long c; + + a = P2_marking_member_0[2]; + b = P2_marking_member_0[3]; + + /* Transition condition */ + if ((b > a)) { + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + /* Permutation for Place P2 : 2, 3, 1, 0 */ + /* Transition T2 */ + if ( (P2_is_marked >= 4) && + ((P3_is_marked + 3) <= 6) && + ( (P2_marking_member_0[3] == P2_marking_member_0[1]) && + (P2_marking_member_0[3] == P2_marking_member_0[0]) ) ) { + long a; + long b; + long c; + + a = P2_marking_member_0[2]; + b = P2_marking_member_0[3]; + + /* Transition condition */ + if ((b > a)) { + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + /* Permutation for Place P2 : 3, 0, 1, 2 */ + /* Transition T2 */ + if ( (P2_is_marked >= 4) && + ((P3_is_marked + 3) <= 6) && + ( (P2_marking_member_0[0] == P2_marking_member_0[1]) && + (P2_marking_member_0[0] == P2_marking_member_0[2]) ) ) { + long a; + long b; + long c; + + a = P2_marking_member_0[3]; + b = P2_marking_member_0[0]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 3, 0, 2, 1 */ + /* Transition T2 */ + if ( (P2_is_marked >= 4) && + ( (P3_is_marked + 3) <= 6) && + ( ( P2_marking_member_0[0] == P2_marking_member_0[2]) && + ( P2_marking_member_0[0] == P2_marking_member_0[1]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[3]; + b = P2_marking_member_0[0]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 3, 1, 0, 2 */ + /* Transition T2 */ + if ( (P2_is_marked >= 4) && + ( (P3_is_marked + 3) <= 6) && + ( ( P2_marking_member_0[1] == P2_marking_member_0[0]) && + ( P2_marking_member_0[1] == P2_marking_member_0[2]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[3]; + b = P2_marking_member_0[1]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 3, 1, 2, 0 */ + /* Transition T2 */ + if ( (P2_is_marked >= 4) && + ( (P3_is_marked + 3) <= 6) && + ( ( P2_marking_member_0[1] == P2_marking_member_0[2]) && + ( P2_marking_member_0[1] == P2_marking_member_0[0]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[3]; + b = P2_marking_member_0[1]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 3, 2, 0, 1 */ + /* Transition T2 */ + if ( (P2_is_marked >= 4) && + ( (P3_is_marked + 3) <= 6) && + ( ( P2_marking_member_0[2] == P2_marking_member_0[0]) && + ( P2_marking_member_0[2] == P2_marking_member_0[1]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[3]; + b = P2_marking_member_0[2]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 3, 2, 1, 0 */ + /* Transition T2 */ + if ( (P2_is_marked >= 4) && + ( (P3_is_marked + 3) <= 6) && + ( ( P2_marking_member_0[2] == P2_marking_member_0[1]) && + ( P2_marking_member_0[2] == P2_marking_member_0[0]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[3]; + b = P2_marking_member_0[2]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 0, 1, 2, 4 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + ( ( P2_marking_member_0[1] == P2_marking_member_0[2]) && + ( P2_marking_member_0[1] == P2_marking_member_0[4]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[0]; + b = P2_marking_member_0[1]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[3]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 0, 1, 3, 4 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + ( ( P2_marking_member_0[1] == P2_marking_member_0[3]) && + ( P2_marking_member_0[1] == P2_marking_member_0[4]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[0]; + b = P2_marking_member_0[1]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[2]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 0, 1, 4, 2 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + ( ( P2_marking_member_0[1] == P2_marking_member_0[4]) && + ( P2_marking_member_0[1] == P2_marking_member_0[2]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[0]; + b = P2_marking_member_0[1]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[3]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 0, 1, 4, 3 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + ( ( P2_marking_member_0[1] == P2_marking_member_0[4]) && + ( P2_marking_member_0[1] == P2_marking_member_0[3]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[0]; + b = P2_marking_member_0[1]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[2]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 0, 2, 1, 4 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + ( ( P2_marking_member_0[2] == P2_marking_member_0[1]) && + ( P2_marking_member_0[2] == P2_marking_member_0[4]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[0]; + b = P2_marking_member_0[2]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[3]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 0, 2, 3, 4 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + ( ( P2_marking_member_0[2] == P2_marking_member_0[3]) && + ( P2_marking_member_0[2] == P2_marking_member_0[4]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[0]; + b = P2_marking_member_0[2]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[1]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 0, 2, 4, 1 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + ( ( P2_marking_member_0[2] == P2_marking_member_0[4]) && + ( P2_marking_member_0[2] == P2_marking_member_0[1]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[0]; + b = P2_marking_member_0[2]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[3]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 0, 2, 4, 3 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + ( ( P2_marking_member_0[2] == P2_marking_member_0[4]) && + ( P2_marking_member_0[2] == P2_marking_member_0[3]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[0]; + b = P2_marking_member_0[2]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[1]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 0, 3, 1, 4 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + ( ( P2_marking_member_0[3] == P2_marking_member_0[1]) && + ( P2_marking_member_0[3] == P2_marking_member_0[4]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[0]; + b = P2_marking_member_0[3]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[2]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 0, 3, 2, 4 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + ( ( P2_marking_member_0[3] == P2_marking_member_0[2]) && + ( P2_marking_member_0[3] == P2_marking_member_0[4]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[0]; + b = P2_marking_member_0[3]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[1]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 0, 3, 4, 1 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + ( ( P2_marking_member_0[3] == P2_marking_member_0[4]) && + ( P2_marking_member_0[3] == P2_marking_member_0[1]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[0]; + b = P2_marking_member_0[3]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[2]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 0, 3, 4, 2 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + ( ( P2_marking_member_0[3] == P2_marking_member_0[4]) && + ( P2_marking_member_0[3] == P2_marking_member_0[2]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[0]; + b = P2_marking_member_0[3]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[1]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 0, 4, 1, 2 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + ( ( P2_marking_member_0[4] == P2_marking_member_0[1]) && + ( P2_marking_member_0[4] == P2_marking_member_0[2]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[0]; + b = P2_marking_member_0[4]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[3]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 0, 4, 1, 3 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + ( ( P2_marking_member_0[4] == P2_marking_member_0[1]) && + ( P2_marking_member_0[4] == P2_marking_member_0[3]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[0]; + b = P2_marking_member_0[4]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[2]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 0, 4, 2, 1 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + ( ( P2_marking_member_0[4] == P2_marking_member_0[2]) && + ( P2_marking_member_0[4] == P2_marking_member_0[1]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[0]; + b = P2_marking_member_0[4]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[3]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 0, 4, 2, 3 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + ( ( P2_marking_member_0[4] == P2_marking_member_0[2]) && + ( P2_marking_member_0[4] == P2_marking_member_0[3]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[0]; + b = P2_marking_member_0[4]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[1]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 0, 4, 3, 1 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + ( ( P2_marking_member_0[4] == P2_marking_member_0[3]) && + ( P2_marking_member_0[4] == P2_marking_member_0[1]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[0]; + b = P2_marking_member_0[4]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[2]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 0, 4, 3, 2 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + ( ( P2_marking_member_0[4] == P2_marking_member_0[3]) && + ( P2_marking_member_0[4] == P2_marking_member_0[2]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[0]; + b = P2_marking_member_0[4]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[1]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 1, 0, 2, 4 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[0] == P2_marking_member_0[2]) && + ( P2_marking_member_0[0] == P2_marking_member_0[4]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[1]; + b = P2_marking_member_0[0]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[3]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 1, 0, 3, 4 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[0] == P2_marking_member_0[3]) && + ( P2_marking_member_0[0] == P2_marking_member_0[4]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[1]; + b = P2_marking_member_0[0]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[2]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 1, 0, 4, 2 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[0] == P2_marking_member_0[4]) && + ( P2_marking_member_0[0] == P2_marking_member_0[2]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[1]; + b = P2_marking_member_0[0]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[3]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 1, 0, 4, 3 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[0] == P2_marking_member_0[4]) && + ( P2_marking_member_0[0] == P2_marking_member_0[3]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[1]; + b = P2_marking_member_0[0]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[2]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 1, 2, 0, 4 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[2] == P2_marking_member_0[0]) && + ( P2_marking_member_0[2] == P2_marking_member_0[4]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[1]; + b = P2_marking_member_0[2]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[3]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 1, 2, 3, 4 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[2] == P2_marking_member_0[3]) && + ( P2_marking_member_0[2] == P2_marking_member_0[4]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[1]; + b = P2_marking_member_0[2]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 1, 2, 4, 0 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[2] == P2_marking_member_0[4]) && + ( P2_marking_member_0[2] == P2_marking_member_0[0]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[1]; + b = P2_marking_member_0[2]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[3]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 1, 2, 4, 3 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[2] == P2_marking_member_0[4]) && + ( P2_marking_member_0[2] == P2_marking_member_0[3]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[1]; + b = P2_marking_member_0[2]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 1, 3, 0, 4 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[3] == P2_marking_member_0[0]) && + ( P2_marking_member_0[3] == P2_marking_member_0[4]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[1]; + b = P2_marking_member_0[3]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[2]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 1, 3, 2, 4 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[3] == P2_marking_member_0[2]) && + ( P2_marking_member_0[3] == P2_marking_member_0[4]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[1]; + b = P2_marking_member_0[3]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 1, 3, 4, 0 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[3] == P2_marking_member_0[4]) && + ( P2_marking_member_0[3] == P2_marking_member_0[0]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[1]; + b = P2_marking_member_0[3]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[2]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 1, 3, 4, 2 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[3] == P2_marking_member_0[4]) && + ( P2_marking_member_0[3] == P2_marking_member_0[2]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[1]; + b = P2_marking_member_0[3]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 1, 4, 0, 2 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[4] == P2_marking_member_0[0]) && + ( P2_marking_member_0[4] == P2_marking_member_0[2]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[1]; + b = P2_marking_member_0[4]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[3]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 1, 4, 0, 3 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[4] == P2_marking_member_0[0]) && + ( P2_marking_member_0[4] == P2_marking_member_0[3]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[1]; + b = P2_marking_member_0[4]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[2]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 1, 4, 2, 0 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[4] == P2_marking_member_0[2]) && + ( P2_marking_member_0[4] == P2_marking_member_0[0]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[1]; + b = P2_marking_member_0[4]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[3]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 1, 4, 2, 3 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[4] == P2_marking_member_0[2]) && + ( P2_marking_member_0[4] == P2_marking_member_0[3]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[1]; + b = P2_marking_member_0[4]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 1, 4, 3, 0 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[4] == P2_marking_member_0[3]) && + ( P2_marking_member_0[4] == P2_marking_member_0[0]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[1]; + b = P2_marking_member_0[4]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[2]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 1, 4, 3, 2 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[4] == P2_marking_member_0[3]) && + ( P2_marking_member_0[4] == P2_marking_member_0[2]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[1]; + b = P2_marking_member_0[4]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 2, 0, 1, 4 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[0] == P2_marking_member_0[1]) && + ( P2_marking_member_0[0] == P2_marking_member_0[4]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[2]; + b = P2_marking_member_0[0]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[3]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 2, 0, 3, 4 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[0] == P2_marking_member_0[3]) && + ( P2_marking_member_0[0] == P2_marking_member_0[4]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[2]; + b = P2_marking_member_0[0]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[1]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 2, 0, 4, 1 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[0] == P2_marking_member_0[4]) && + ( P2_marking_member_0[0] == P2_marking_member_0[1]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[2]; + b = P2_marking_member_0[0]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[3]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 2, 0, 4, 3 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[0] == P2_marking_member_0[4]) && + ( P2_marking_member_0[0] == P2_marking_member_0[3]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[2]; + b = P2_marking_member_0[0]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[1]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 2, 1, 0, 4 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[1] == P2_marking_member_0[0]) && + ( P2_marking_member_0[1] == P2_marking_member_0[4]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[2]; + b = P2_marking_member_0[1]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[3]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 2, 1, 3, 4 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[1] == P2_marking_member_0[3]) && + ( P2_marking_member_0[1] == P2_marking_member_0[4]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[2]; + b = P2_marking_member_0[1]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 2, 1, 4, 0 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[1] == P2_marking_member_0[4]) && + ( P2_marking_member_0[1] == P2_marking_member_0[0]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[2]; + b = P2_marking_member_0[1]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[3]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 2, 1, 4, 3 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[1] == P2_marking_member_0[4]) && + ( P2_marking_member_0[1] == P2_marking_member_0[3]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[2]; + b = P2_marking_member_0[1]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 2, 3, 0, 4 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[3] == P2_marking_member_0[0]) && + ( P2_marking_member_0[3] == P2_marking_member_0[4]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[2]; + b = P2_marking_member_0[3]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[1]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 2, 3, 1, 4 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[3] == P2_marking_member_0[1]) && + ( P2_marking_member_0[3] == P2_marking_member_0[4]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[2]; + b = P2_marking_member_0[3]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 2, 3, 4, 0 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[3] == P2_marking_member_0[4]) && + ( P2_marking_member_0[3] == P2_marking_member_0[0]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[2]; + b = P2_marking_member_0[3]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[1]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 2, 3, 4, 1 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[3] == P2_marking_member_0[4]) && + ( P2_marking_member_0[3] == P2_marking_member_0[1]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[2]; + b = P2_marking_member_0[3]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 2, 4, 0, 1 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[4] == P2_marking_member_0[0]) && + ( P2_marking_member_0[4] == P2_marking_member_0[1]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[2]; + b = P2_marking_member_0[4]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[3]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 2, 4, 0, 3 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[4] == P2_marking_member_0[0]) && + ( P2_marking_member_0[4] == P2_marking_member_0[3]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[2]; + b = P2_marking_member_0[4]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[1]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 2, 4, 1, 0 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[4] == P2_marking_member_0[1]) && + ( P2_marking_member_0[4] == P2_marking_member_0[0]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[2]; + b = P2_marking_member_0[4]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[3]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 2, 4, 1, 3 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[4] == P2_marking_member_0[1]) && + ( P2_marking_member_0[4] == P2_marking_member_0[3]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[2]; + b = P2_marking_member_0[4]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 2, 4, 3, 0 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[4] == P2_marking_member_0[3]) && + ( P2_marking_member_0[4] == P2_marking_member_0[0]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[2]; + b = P2_marking_member_0[4]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[1]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 2, 4, 3, 1 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[4] == P2_marking_member_0[3]) && + ( P2_marking_member_0[4] == P2_marking_member_0[1]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[2]; + b = P2_marking_member_0[4]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 3, 0, 1, 4 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[0] == P2_marking_member_0[1]) && + ( P2_marking_member_0[0] == P2_marking_member_0[4]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[3]; + b = P2_marking_member_0[0]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[2]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 3, 0, 2, 4 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[0] == P2_marking_member_0[2]) && + ( P2_marking_member_0[0] == P2_marking_member_0[4]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[3]; + b = P2_marking_member_0[0]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[1]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 3, 0, 4, 1 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[0] == P2_marking_member_0[4]) && + ( P2_marking_member_0[0] == P2_marking_member_0[1]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[3]; + b = P2_marking_member_0[0]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[2]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 3, 0, 4, 2 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[0] == P2_marking_member_0[4]) && + ( P2_marking_member_0[0] == P2_marking_member_0[2]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[3]; + b = P2_marking_member_0[0]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[1]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 3, 1, 0, 4 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[1] == P2_marking_member_0[0]) && + ( P2_marking_member_0[1] == P2_marking_member_0[4]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[3]; + b = P2_marking_member_0[1]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[2]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 3, 1, 2, 4 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[1] == P2_marking_member_0[2]) && + ( P2_marking_member_0[1] == P2_marking_member_0[4]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[3]; + b = P2_marking_member_0[1]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 3, 1, 4, 0 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[1] == P2_marking_member_0[4]) && + ( P2_marking_member_0[1] == P2_marking_member_0[0]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[3]; + b = P2_marking_member_0[1]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[2]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 3, 1, 4, 2 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[1] == P2_marking_member_0[4]) && + ( P2_marking_member_0[1] == P2_marking_member_0[2]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[3]; + b = P2_marking_member_0[1]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 3, 2, 0, 4 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[2] == P2_marking_member_0[0]) && + ( P2_marking_member_0[2] == P2_marking_member_0[4]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[3]; + b = P2_marking_member_0[2]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[1]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 3, 2, 1, 4 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[2] == P2_marking_member_0[1]) && + ( P2_marking_member_0[2] == P2_marking_member_0[4]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[3]; + b = P2_marking_member_0[2]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 3, 2, 4, 0 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[2] == P2_marking_member_0[4]) && + ( P2_marking_member_0[2] == P2_marking_member_0[0]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[3]; + b = P2_marking_member_0[2]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[1]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 3, 2, 4, 1 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[2] == P2_marking_member_0[4]) && + ( P2_marking_member_0[2] == P2_marking_member_0[1]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[3]; + b = P2_marking_member_0[2]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 3, 4, 0, 1 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[4] == P2_marking_member_0[0]) && + ( P2_marking_member_0[4] == P2_marking_member_0[1]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[3]; + b = P2_marking_member_0[4]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[2]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 3, 4, 0, 2 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[4] == P2_marking_member_0[0]) && + ( P2_marking_member_0[4] == P2_marking_member_0[2]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[3]; + b = P2_marking_member_0[4]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[1]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 3, 4, 1, 0 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[4] == P2_marking_member_0[1]) && + ( P2_marking_member_0[4] == P2_marking_member_0[0]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[3]; + b = P2_marking_member_0[4]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[2]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 3, 4, 1, 2 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[4] == P2_marking_member_0[1]) && + ( P2_marking_member_0[4] == P2_marking_member_0[2]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[3]; + b = P2_marking_member_0[4]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 3, 4, 2, 0 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[4] == P2_marking_member_0[2]) && + ( P2_marking_member_0[4] == P2_marking_member_0[0]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[3]; + b = P2_marking_member_0[4]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[1]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 3, 4, 2, 1 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[4] == P2_marking_member_0[2]) && + ( P2_marking_member_0[4] == P2_marking_member_0[1]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[3]; + b = P2_marking_member_0[4]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 4, 0, 1, 2 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[0] == P2_marking_member_0[1]) && + ( P2_marking_member_0[0] == P2_marking_member_0[2]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[4]; + b = P2_marking_member_0[0]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[3]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 4, 0, 1, 3 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[0] == P2_marking_member_0[1]) && + ( P2_marking_member_0[0] == P2_marking_member_0[3]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[4]; + b = P2_marking_member_0[0]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[2]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 4, 0, 2, 1 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[0] == P2_marking_member_0[2]) && + ( P2_marking_member_0[0] == P2_marking_member_0[1]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[4]; + b = P2_marking_member_0[0]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[3]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 4, 0, 2, 3 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[0] == P2_marking_member_0[2]) && + ( P2_marking_member_0[0] == P2_marking_member_0[3]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[4]; + b = P2_marking_member_0[0]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[1]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 4, 0, 3, 1 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[0] == P2_marking_member_0[3]) && + ( P2_marking_member_0[0] == P2_marking_member_0[1]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[4]; + b = P2_marking_member_0[0]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[2]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 4, 0, 3, 2 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[0] == P2_marking_member_0[3]) && + ( P2_marking_member_0[0] == P2_marking_member_0[2]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[4]; + b = P2_marking_member_0[0]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[1]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 4, 1, 0, 2 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[1] == P2_marking_member_0[0]) && + ( P2_marking_member_0[1] == P2_marking_member_0[2]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[4]; + b = P2_marking_member_0[1]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[3]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 4, 1, 0, 3 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[1] == P2_marking_member_0[0]) && + ( P2_marking_member_0[1] == P2_marking_member_0[3]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[4]; + b = P2_marking_member_0[1]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[2]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 4, 1, 2, 0 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[1] == P2_marking_member_0[2]) && + ( P2_marking_member_0[1] == P2_marking_member_0[0]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[4]; + b = P2_marking_member_0[1]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[3]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 4, 1, 2, 3 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[1] == P2_marking_member_0[2]) && + ( P2_marking_member_0[1] == P2_marking_member_0[3]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[4]; + b = P2_marking_member_0[1]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 4, 1, 3, 0 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[1] == P2_marking_member_0[3]) && + ( P2_marking_member_0[1] == P2_marking_member_0[0]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[4]; + b = P2_marking_member_0[1]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[2]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 4, 1, 3, 2 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[1] == P2_marking_member_0[3]) && + ( P2_marking_member_0[1] == P2_marking_member_0[2]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[4]; + b = P2_marking_member_0[1]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 4, 2, 0, 1 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[2] == P2_marking_member_0[0]) && + ( P2_marking_member_0[2] == P2_marking_member_0[1]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[4]; + b = P2_marking_member_0[2]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[3]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 4, 2, 0, 3 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[2] == P2_marking_member_0[0]) && + ( P2_marking_member_0[2] == P2_marking_member_0[3]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[4]; + b = P2_marking_member_0[2]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[1]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 4, 2, 1, 0 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[2] == P2_marking_member_0[1]) && + ( P2_marking_member_0[2] == P2_marking_member_0[0]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[4]; + b = P2_marking_member_0[2]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[3]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 4, 2, 1, 3 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[2] == P2_marking_member_0[1]) && + ( P2_marking_member_0[2] == P2_marking_member_0[3]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[4]; + b = P2_marking_member_0[2]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 4, 2, 3, 0 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[2] == P2_marking_member_0[3]) && + ( P2_marking_member_0[2] == P2_marking_member_0[0]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[4]; + b = P2_marking_member_0[2]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[1]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 4, 2, 3, 1 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[2] == P2_marking_member_0[3]) && + ( P2_marking_member_0[2] == P2_marking_member_0[1]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[4]; + b = P2_marking_member_0[2]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 4, 3, 0, 1 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[3] == P2_marking_member_0[0]) && + ( P2_marking_member_0[3] == P2_marking_member_0[1]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[4]; + b = P2_marking_member_0[3]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[2]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 4, 3, 0, 2 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[3] == P2_marking_member_0[0]) && + ( P2_marking_member_0[3] == P2_marking_member_0[2]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[4]; + b = P2_marking_member_0[3]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[1]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 4, 3, 1, 0 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[3] == P2_marking_member_0[1]) && + ( P2_marking_member_0[3] == P2_marking_member_0[0]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[4]; + b = P2_marking_member_0[3]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[2]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 4, 3, 1, 2 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[3] == P2_marking_member_0[1]) && + ( P2_marking_member_0[3] == P2_marking_member_0[2]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[4]; + b = P2_marking_member_0[3]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 4, 3, 2, 0 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[3] == P2_marking_member_0[2]) && + ( P2_marking_member_0[3] == P2_marking_member_0[0]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[4]; + b = P2_marking_member_0[3]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_marking_member_0[0] = P2_marking_member_0[1]; + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + + + /* Permutation for Place P2 : 4, 3, 2, 1 */ + /* Transition T2 */ + if ( (P2_is_marked >= 5) && + ( (P3_is_marked + 3) <= 6) && + (( P2_marking_member_0[3] == P2_marking_member_0[2]) && + ( P2_marking_member_0[3] == P2_marking_member_0[1]) ) ) { + + long a; + long b; + long c; + + a = P2_marking_member_0[4]; + b = P2_marking_member_0[3]; + + /* Transition condition */ + if ((b > a)) { + + /* demarking of input places */ + P2_is_marked -= 4; + + /* preaction */ + c = a + b; + + /* marking of output places */ + P3_marking_member_0[P3_is_marked+0] = a; + P3_marking_member_0[P3_is_marked+1] = b; + P3_marking_member_0[P3_is_marked+2] = c; + P3_is_marked += 3; + + } /* end of if (Transition condition) */ + } + } + + + dummy_i = 77; + + return dummy_i; + + +} + +/*************************************************************************** + * + * end of file + * + ***************************************************************************/ + diff --git a/test/src/prime.c b/test/src/prime.c new file mode 100755 index 0000000..1c204ad --- /dev/null +++ b/test/src/prime.c @@ -0,0 +1,46 @@ +/* MDH WCET BENCHMARK SUITE. */ + +/* Changes: + * JG 2005/12/08: Prototypes added, and changed exit to return in main. + */ + +typedef unsigned char bool; +typedef unsigned int uint; + +bool divides (uint n, uint m); +bool even (uint n); +bool prime (uint n); +void swap (uint* a, uint* b); + +bool divides (uint n, uint m) { + return (m % n == 0); +} + +bool even (uint n) { + return (divides (2, n)); +} + +bool prime (uint n) { + uint i; + if (even (n)) + return (n == 2); + for (i = 3; i * i <= n; i += 2) { + if (divides (i, n)) /* ai: loop here min 0 max 357 end; */ + return 0; + } + return (n > 1); +} + +void swap (uint* a, uint* b) { + uint tmp = *a; + *a = *b; + *b = tmp; +} + +int main () { + uint x = 21649; + uint y = 513239; + swap (&x, &y); + return (!(prime(x) && prime(y))); +} + diff --git a/test/src/qsort-exam.c b/test/src/qsort-exam.c new file mode 100755 index 0000000..edbbc2a --- /dev/null +++ b/test/src/qsort-exam.c @@ -0,0 +1,119 @@ +/*************************************************************************/ +/* */ +/* SNU-RT Benchmark Suite for Worst Case Timing Analysis */ +/* ===================================================== */ +/* Collected and Modified by S.-S. Lim */ +/* sslim@archi.snu.ac.kr */ +/* Real-Time Research Group */ +/* Seoul National University */ +/* */ +/* */ +/* < Features > - restrictions for our experimental environment */ +/* */ +/* 1. Completely structured. */ +/* - There are no unconditional jumps. */ +/* - There are no exit from loop bodies. */ +/* (There are no 'break' or 'return' in loop bodies) */ +/* 2. No 'switch' statements. */ +/* 3. No 'do..while' statements. */ +/* 4. Expressions are restricted. */ +/* - There are no multiple expressions joined by 'or', */ +/* 'and' operations. */ +/* 5. No library calls. */ +/* - All the functions needed are implemented in the */ +/* source file. */ +/* */ +/* */ +/*************************************************************************/ +/* */ +/* FILE: qsort-exam.c */ +/* SOURCE : Numerical Recipes in C - The Second Edition */ +/* */ +/* DESCRIPTION : */ +/* */ +/* Non-recursive version of quick sort algorithm. */ +/* This example sorts 20 floating point numbers, arr[]. */ +/* */ +/* REMARK : */ +/* */ +/* EXECUTION TIME : */ +/* */ +/* */ +/*************************************************************************/ + + +#define SWAP(a,b) temp=(a);(a)=(b);(b)=temp; +#define M 7 +#define NSTACK 50 + +float arr[20] = { + 5, 4, 10.3, 1.1, 5.7, 100, 231, 111, 49.5, 99, + 10, 150, 222.22, 101, 77, 44, 35, 20.54, 99.99, 88.88 +}; + +int istack[100]; + +void sort(unsigned long n) +{ + unsigned long i,ir=n,j,k,l=1; + int jstack=0; + int flag; + float a,temp; + + flag = 0; + for (;;) { + if (ir-l < M) { + for (j=l+1;j<=ir;j++) { + a=arr[j]; + for (i=j-1;i>=l;i--) { + if (arr[i] <= a) break; + arr[i+1]=arr[i]; + } + arr[i+1]=a; + } + if (jstack == 0) break; + ir=istack[jstack--]; + l=istack[jstack--]; + } else { + k=(l+ir) >> 1; + SWAP(arr[k],arr[l+1]) + if (arr[l] > arr[ir]) { + SWAP(arr[l],arr[ir]) + } + if (arr[l+1] > arr[ir]) { + SWAP(arr[l+1],arr[ir]) + } + if (arr[l] > arr[l+1]) { + SWAP(arr[l],arr[l+1]) + } + i=l+1; + j=ir; + a=arr[l+1]; + for (;;) { + i++; while (arr[i] < a) i++; + j--; while (arr[j] > a) j--; + if (j < i) break; + SWAP(arr[i],arr[j]); + } + arr[l+1]=arr[j]; + arr[j]=a; + jstack += 2; + + if (ir-i+1 >= j-l) { + istack[jstack]=ir; + istack[jstack-1]=i; + ir=j-1; + } else { + istack[jstack]=j-1; + istack[jstack-1]=l; + l=i; + } + } + } +} + +main() +{ + sort(20); +} + diff --git a/test/src/qurt.c b/test/src/qurt.c new file mode 100755 index 0000000..db4f1fc --- /dev/null +++ b/test/src/qurt.c @@ -0,0 +1,173 @@ +/* MDH WCET BENCHMARK SUITE. */ +/* 2012/09/28, Jan Gustafsson + * Changes: + * - This program redefines the C standard function fabs and sqrt. Therefore, these functions has been renamed with prefix qurt_. + * - qurt.c:95:6: warning: explicitly assigning a variable of type 'double' to itself: fixed + * - qurt.c:105:6: warning: unused variable 'flag' [-Wunused-variable]: fixed + */ + +/*************************************************************************/ +/* */ +/* SNU-RT Benchmark Suite for Worst Case Timing Analysis */ +/* ===================================================== */ +/* Collected and Modified by S.-S. Lim */ +/* sslim@archi.snu.ac.kr */ +/* Real-Time Research Group */ +/* Seoul National University */ +/* */ +/* */ +/* < Features > - restrictions for our experimental environment */ +/* */ +/* 1. Completely structured. */ +/* - There are no unconditional jumps. */ +/* - There are no exit from loop bodies. */ +/* (There are no 'break' or 'return' in loop bodies) */ +/* 2. No 'switch' statements. */ +/* 3. No 'do..while' statements. */ +/* 4. Expressions are restricted. */ +/* - There are no multiple expressions joined by 'or', */ +/* 'and' operations. */ +/* 5. No library calls. */ +/* - All the functions needed are implemented in the */ +/* source file. */ +/* */ +/* */ +/*************************************************************************/ +/* */ +/* FILE: qurt.c */ +/* SOURCE : Turbo C Programming for Engineering by Hyun Soo Ahn */ +/* */ +/* DESCRIPTION : */ +/* */ +/* Root computation of quadratic equations. */ +/* The real and imaginary parts of the solution are stored in the */ +/* array x1[] and x2[]. */ +/* */ +/* REMARK : */ +/* */ +/* EXECUTION TIME : */ +/* */ +/* */ +/*************************************************************************/ + + + +/* +** Benchmark Suite for Real-Time Applications, by Sung-Soo Lim +** +** III-7. qurt.c : the root computation of a quadratic equation +** (from the book C Programming for EEs by Hyun Soon Ahn) +*/ + + +double a[3], x1[2], x2[2]; +int flag; + +int qurt(); + + +double qurt_fabs(double n) +{ + double f; + + if (n >= 0) f = n; + else f = -n; + return f; +} + +double qurt_sqrt(val) +double val; +{ + double x = val/10; + + double dx; + + double diff; + double min_tol = 0.00001; + + int i, flag; + + flag = 0; + if (val == 0 ) x = 0; + else { + for (i=1;i<20;i++) + { + if (!flag) { + dx = (val - (x*x)) / (2.0 * x); + x = x + dx; + diff = val - (x*x); + if (qurt_fabs(diff) <= min_tol) flag = 1; + } + else {} /* JG */ +/* x =x; */ + } + } + return (x); +} + + +int main() +{ + +/* int flag; */ /* JG */ + + + a[0] = 1.0; + a[1] = -3.0; + a[2] = 2.0; + + qurt(); + + + a[0] = 1.0; + a[1] = -2.0; + a[2] = 1.0; + + qurt(); + + + a[0] = 1.0; + a[1] = -4.0; + a[2] = 8.0; + + qurt(); + return 0; +} + +int qurt() +{ + double d, w1, w2; + + if(a[0] == 0.0) return(999); + d = a[1]*a[1] - 4 * a[0] * a[2]; + w1 = 2.0 * a[0]; + w2 = qurt_sqrt(qurt_fabs(d)); + if(d > 0.0) + { + flag = 1; + x1[0] = (-a[1] + w2) / w1; + x1[1] = 0.0; + x2[0] = (-a[1] - w2) / w1; + x2[1] = 0.0; + return(0); + } + else if(d == 0.0) + { + flag = 0; + x1[0] = -a[1] / w1; + x1[1] = 0.0; + x2[0] = x1[0]; + x2[1] = 0.0; + return(0); + } + else + { + flag = -1; + w2 /= w1; + x1[0] = -a[1] / w1; + x1[1] = w2; + x2[0] = x1[0]; + x2[1] = -w2; + return(0); + } +} diff --git a/test/src/recursion.c b/test/src/recursion.c new file mode 100755 index 0000000..a5350a2 --- /dev/null +++ b/test/src/recursion.c @@ -0,0 +1,40 @@ +/* $Id: recursion.c,v 1.2 2005/04/04 11:34:58 csg Exp $ */ + +/* Generate an example of recursive code, to see * + * how it can be modeled in the scope graph. */ + +/* self-recursion */ +int fib(int i) +{ + if(i==0) + return 1; + if(i==1) + return 1; + return fib(i-1) + fib(i-2); +} + +/* mutual recursion */ +int anka(int); + +int kalle(int i) +{ + if(i<=0) + return 0; + else + return anka(i-1); +} + +int anka(int i) +{ + if(i<=0) + return 1; + else + return kalle(i-1); +} + +extern volatile int In; + +void main(void) +{ + In = fib(10); +} diff --git a/test/src/select.c b/test/src/select.c new file mode 100755 index 0000000..f709859 --- /dev/null +++ b/test/src/select.c @@ -0,0 +1,112 @@ +/*************************************************************************/ +/* */ +/* SNU-RT Benchmark Suite for Worst Case Timing Analysis */ +/* ===================================================== */ +/* Collected and Modified by S.-S. Lim */ +/* sslim@archi.snu.ac.kr */ +/* Real-Time Research Group */ +/* Seoul National University */ +/* */ +/* */ +/* < Features > - restrictions for our experimental environment */ +/* */ +/* 1. Completely structured. */ +/* - There are no unconditional jumps. */ +/* - There are no exit from loop bodies. */ +/* (There are no 'break' or 'return' in loop bodies) */ +/* 2. No 'switch' statements. */ +/* 3. No 'do..while' statements. */ +/* 4. Expressions are restricted. */ +/* - There are no multiple expressions joined by 'or', */ +/* 'and' operations. */ +/* 5. No library calls. */ +/* - All the functions needed are implemented in the */ +/* source file. */ +/* */ +/* */ +/*************************************************************************/ +/* */ +/* FILE: select.c */ +/* SOURCE : Numerical Recipes in C - The Second Edition */ +/* */ +/* DESCRIPTION : */ +/* */ +/* A function to select the Nth largest number in the floating poi- */ +/* nt array arr[]. */ +/* The parameters to function select are k and n. Then the function */ +/* selects k-th largest number out of n original numbers. */ +/* */ +/* REMARK : */ +/* */ +/* EXECUTION TIME : */ +/* */ +/* */ +/*************************************************************************/ + + + + + +#define SWAP(a,b) temp=(a);(a)=(b);(b)=temp; + +float arr[20] = { + 5, 4, 10.3, 1.1, 5.7, 100, 231, 111, 49.5, 99, + 10, 150, 222.22, 101, 77, 44, 35, 20.54, 99.99, 888.88 +}; + + +float select(unsigned long k, unsigned long n) +{ + unsigned long i,ir,j,l,mid; + float a,temp; + int flag, flag2; + + l=1; + ir=n; + flag = flag2 = 0; + while (!flag) { + if (ir <= l+1) { + if (ir == l+1) + if (arr[ir] < arr[l]) { + SWAP(arr[l],arr[ir]) + } + flag = 1; + } else if (!flag) { + mid=(l+ir) >> 1; + SWAP(arr[mid],arr[l+1]) + if (arr[l+1] > arr[ir]) { + SWAP(arr[l+1],arr[ir]) + } + if (arr[l] > arr[ir]) { + SWAP(arr[l],arr[ir]) + } + if (arr[l+1]> arr[l]) { + SWAP(arr[l+1],arr[l]) + } + i=l+1; + j=ir; + a=arr[l]; + while (!flag2) { + i++; + while (arr[i] < a) i++; + j--; + while (arr[j] > a) j--; + if (j < i) flag2 = 1; + if (!flag2) SWAP(arr[i],arr[j]); + + } + arr[l]=arr[j]; + arr[j]=a; + if (j >= k) ir=j-1; + if (j <= k) l=i; + } + + } + return arr[k]; +} + +main() +{ + select(10, 20); +} + diff --git a/test/src/sqrt.c b/test/src/sqrt.c new file mode 100755 index 0000000..a2da833 --- /dev/null +++ b/test/src/sqrt.c @@ -0,0 +1,90 @@ +/* MDH WCET BENCHMARK SUITE. */ + +/* 2012/09/28, Jan Gustafsson + * Changes: + * - This program redefines the C standard function sqrt. Therefore, this function has been renamed to sqrtfcn. + * - qrt.c:79:15: warning: explicitly assigning a variable of type 'float' to itself: fixed + */ + +/*************************************************************************/ +/* */ +/* SNU-RT Benchmark Suite for Worst Case Timing Analysis */ +/* ===================================================== */ +/* Collected and Modified by S.-S. Lim */ +/* sslim@archi.snu.ac.kr */ +/* Real-Time Research Group */ +/* Seoul National University */ +/* */ +/* */ +/* < Features > - restrictions for our experimental environment */ +/* */ +/* 1. Completely structured. */ +/* - There are no unconditional jumps. */ +/* - There are no exit from loop bodies. */ +/* (There are no 'break' or 'return' in loop bodies) */ +/* 2. No 'switch' statements. */ +/* 3. No 'do..while' statements. */ +/* 4. Expressions are restricted. */ +/* - There are no multiple expressions joined by 'or', */ +/* 'and' operations. */ +/* 5. No library calls. */ +/* - All the functions needed are implemented in the */ +/* source file. */ +/* */ +/* */ +/*************************************************************************/ +/* */ +/* FILE: sqrt.c */ +/* SOURCE : Public Domain Code */ +/* */ +/* DESCRIPTION : */ +/* */ +/* Square root function implemented by Taylor series. */ +/* */ +/* REMARK : */ +/* */ +/* EXECUTION TIME : */ +/* */ +/* */ +/*************************************************************************/ + + +float fabs(float x) +{ + if (x < 0) + return -x; + else + return x; +} + +float sqrtfcn(float val) +{ + float x = val/10; + + float dx; + + double diff; + double min_tol = 0.00001; + + int i, flag; + + flag = 0; + if (val == 0 ) + x = 0; + else { + for (i=1;i<20;i++) + { + if (!flag) { + dx = (val - (x*x)) / (2.0 * x); + x = x + dx; + diff = val - (x*x); + if (fabs(diff) <= min_tol) + flag = 1; + } + else {} /* JG */ +/* x =x; */ + } + } + return (x); +} + diff --git a/test/src/statemate.c b/test/src/statemate.c new file mode 100755 index 0000000..fe4ad36 --- /dev/null +++ b/test/src/statemate.c @@ -0,0 +1,1273 @@ +/* + *---------------------------------------------------------- + * + * statemate.c + * + * This code was automatically generated by + * the STAtechart Real-time-Code generator STARC + * which was developed at C-LAB. + * + * The original StateChart specifies an experimental + * car window lift control. + * + * Modified and maintained by + * Friedhelm Stappert + * C-LAB, Paderborn, Germany + * fst@c-lab.de + * + * Modifications: + * o '#define float int' so we don't need a + * floating point library. + * + * o modified the Bitlist functions. + * 'Bitlist is now just a array of char's + * so we don't need the BitList library. + * + *---------------------------------------------------------- + */ + + +/* +** actually, we don't really need floating point here +*/ +#define float int + +static char Bitlist[64]; +#define SYS_bit_get(a,b) (a)[(b)] +#define SYS_bit_clr(a,b) (a)[(b)] = 0 +#define SYS_bit_set(a,b) (a)[(b)] = 1 +#define SYS_bit_cpy(a1,i1,a2,i2) (a1)[(i1)] = (a2)[(i2)] + + +#define active_KINDERSICHERUNG_CTRL_IDX 10 +#define active_KINDERSICHERUNG_CTRL_copy_IDX 11 +#define active_KINDERSICHERUNG_CTRL_old_IDX 12 +#define active_FH_TUERMODUL_CTRL_IDX 13 +#define active_FH_TUERMODUL_CTRL_copy_IDX 14 +#define active_FH_TUERMODUL_CTRL_old_IDX 15 +#define active_EINKLEMMSCHUTZ_CTRL_IDX 16 +#define active_EINKLEMMSCHUTZ_CTRL_copy_IDX 17 +#define active_EINKLEMMSCHUTZ_CTRL_old_IDX 18 +#define active_BLOCK_ERKENNUNG_CTRL_IDX 19 +#define active_BLOCK_ERKENNUNG_CTRL_copy_IDX 20 +#define active_BLOCK_ERKENNUNG_CTRL_old_IDX 21 +#define entered_EINSCHALTSTROM_MESSEN_BLOCK_ERKENNUNG_CTRL_IDX 0 +#define entered_EINSCHALTSTROM_MESSEN_BLOCK_ERKENNUNG_CTRL_copy_IDX 1 +unsigned long tm_entered_EINSCHALTSTROM_MESSEN_BLOCK_ERKENNUNG_CTRLch_BLOCK_ERKENNUNG_CTRL__N_copy; +#define entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_IDX 4 +#define entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_copy_IDX 5 +#define exited_BEREIT_FH_TUERMODUL_CTRL_IDX 6 +#define exited_BEREIT_FH_TUERMODUL_CTRL_copy_IDX 7 +unsigned long tm_entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRLexited_BEREIT_FH_TUERMODUL_CTRL; +unsigned long tm_entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRL; +unsigned long sc_FH_TUERMODUL_CTRL_2375_2 ; +unsigned long sc_FH_TUERMODUL_CTRL_2352_1 ; +unsigned long sc_FH_TUERMODUL_CTRL_2329_1 ; +int FH_TUERMODUL_CTRL__N; +int FH_TUERMODUL_CTRL__N_copy; +int FH_TUERMODUL_CTRL__N_old; +unsigned long sc_FH_TUERMODUL_CTRL_1781_10 ; +unsigned long sc_FH_TUERMODUL_CTRL_1739_10 ; +float FH_TUERMODUL__POSITION ; +float FH_TUERMODUL__I_EIN ; +float FH_TUERMODUL__I_EIN_old; +int FH_DU__MFH; +int FH_DU__MFH_copy; +float FH_DU__POSITION ; +float FH_DU__I_EIN ; +float FH_DU__I_EIN_old; +float BLOCK_ERKENNUNG_CTRL__I_EIN_MAX; +float BLOCK_ERKENNUNG_CTRL__I_EIN_MAX_copy; +int BLOCK_ERKENNUNG_CTRL__N; +int BLOCK_ERKENNUNG_CTRL__N_copy; +int BLOCK_ERKENNUNG_CTRL__N_old; +char FH_TUERMODUL_CTRL__INREVERS2; +char FH_TUERMODUL_CTRL__INREVERS2_copy; +char FH_TUERMODUL_CTRL__INREVERS1; +char FH_TUERMODUL_CTRL__INREVERS1_copy; +char FH_TUERMODUL_CTRL__FT ; +char FH_TUERMODUL__SFHZ_ZENTRAL ; +char FH_TUERMODUL__SFHZ_ZENTRAL_old; +char FH_TUERMODUL__SFHZ_MEC ; +char FH_TUERMODUL__SFHZ_MEC_old; +char FH_TUERMODUL__SFHA_ZENTRAL ; +char FH_TUERMODUL__SFHA_ZENTRAL_old; +char FH_TUERMODUL__SFHA_MEC ; +char FH_TUERMODUL__SFHA_MEC_old; +char FH_TUERMODUL__KL_50 ; +char FH_TUERMODUL__BLOCK; +char FH_TUERMODUL__BLOCK_copy; +char FH_TUERMODUL__BLOCK_old; +char FH_TUERMODUL__FT ; +char FH_TUERMODUL__SFHZ; +char FH_TUERMODUL__SFHZ_copy; +char FH_TUERMODUL__SFHZ_old; +char FH_TUERMODUL__SFHA; +char FH_TUERMODUL__SFHA_copy; +char FH_TUERMODUL__SFHA_old; +char FH_TUERMODUL__MFHZ; +char FH_TUERMODUL__MFHZ_copy; +char FH_TUERMODUL__MFHZ_old; +char FH_TUERMODUL__MFHA; +char FH_TUERMODUL__MFHA_copy; +char FH_TUERMODUL__MFHA_old; +char FH_TUERMODUL__EKS_LEISTE_AKTIV ; +char FH_TUERMODUL__EKS_LEISTE_AKTIV_old; +char FH_TUERMODUL__COM_OPEN ; +char FH_TUERMODUL__COM_CLOSE ; +char FH_DU__KL_50 ; +char FH_DU__S_FH_FTZU; +char FH_DU__S_FH_FTAUF; +char FH_DU__FT ; +char FH_DU__EKS_LEISTE_AKTIV ; +char FH_DU__EKS_LEISTE_AKTIV_old; +char FH_DU__S_FH_TMBFAUFCAN; +char FH_DU__S_FH_TMBFAUFCAN_copy; +char FH_DU__S_FH_TMBFAUFCAN_old; +char FH_DU__S_FH_TMBFZUCAN; +char FH_DU__S_FH_TMBFZUCAN_copy; +char FH_DU__S_FH_TMBFZUCAN_old; +char FH_DU__S_FH_TMBFZUDISC ; +char FH_DU__S_FH_TMBFZUDISC_old; +char FH_DU__S_FH_TMBFAUFDISC ; +char FH_DU__S_FH_TMBFAUFDISC_old; +char FH_DU__S_FH_ZUDISC ; +char FH_DU__S_FH_AUFDISC ; +char FH_DU__DOOR_ID ; +char FH_DU__BLOCK; +char FH_DU__BLOCK_copy; +char FH_DU__BLOCK_old; +char FH_DU__MFHZ; +char FH_DU__MFHZ_copy; +char FH_DU__MFHZ_old; +char FH_DU__MFHA; +char FH_DU__MFHA_copy; +char FH_DU__MFHA_old; +#define FH_TUERMODUL_CTRL__END_REVERS_IDX 22 +#define FH_TUERMODUL_CTRL__END_REVERS_copy_IDX 23 +#define FH_TUERMODUL__EINKLEMMUNG_IDX 24 + +unsigned long time; +char stable; +char step; + +char NICHT_INITIALISIERT_NICHT_INITIALISIERT_next_state; /** 2 bits **/ +char ZENTRAL_KINDERSICHERUNG_CTRL_next_state; /** 1 bits **/ +char MEC_KINDERSICHERUNG_CTRL_next_state; /** 1 bits **/ +char KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state; /** 2 bits **/ +char B_FH_TUERMODUL_CTRL_next_state; /** 2 bits **/ +char A_FH_TUERMODUL_CTRL_next_state; /** 1 bits **/ +char WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_next_state; /** 1 bits **/ +char INITIALISIERT_FH_TUERMODUL_CTRL_next_state; /** 2 bits **/ +char TIPP_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state; /** 2 bits **/ +char MANUELL_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state; /** 2 bits **/ +char OEFFNEN_FH_TUERMODUL_CTRL_next_state; /** 2 bits **/ +char SCHLIESSEN_FH_TUERMODUL_CTRL_next_state; /** 2 bits **/ +char FH_STEUERUNG_DUMMY_FH_STEUERUNG_DUMMY_next_state; /** 2 bits **/ +char EINKLEMMSCHUTZ_CTRL_EINKLEMMSCHUTZ_CTRL_next_state; /** 2 bits **/ +char BEWEGUNG_BLOCK_ERKENNUNG_CTRL_next_state; /** 2 bits **/ +char BLOCK_ERKENNUNG_CTRL_BLOCK_ERKENNUNG_CTRL_next_state; /** 2 bits **/ + + +void interface(void) +{ + if (SYS_bit_get(Bitlist, entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_IDX)) + tm_entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRL = time; + if (SYS_bit_get(Bitlist, entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_IDX) || SYS_bit_get (Bitlist, exited_BEREIT_FH_TUERMODUL_CTRL_IDX)) + tm_entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRLexited_BEREIT_FH_TUERMODUL_CTRL = time; + if ((sc_FH_TUERMODUL_CTRL_2375_2 != 0) && (time - sc_FH_TUERMODUL_CTRL_2375_2 >= 0.5)) + { + FH_TUERMODUL__MFHA_copy = 0; + sc_FH_TUERMODUL_CTRL_2375_2 = 0; + + } + if ((sc_FH_TUERMODUL_CTRL_2352_1 != 0) && (time - sc_FH_TUERMODUL_CTRL_2352_1 >= 0.5)) + { + FH_TUERMODUL__MFHZ_copy = 0; + sc_FH_TUERMODUL_CTRL_2352_1 = 0; + + } + if ((sc_FH_TUERMODUL_CTRL_2329_1 != 0) && (time - sc_FH_TUERMODUL_CTRL_2329_1 >= 0.5)) + { + FH_TUERMODUL__MFHZ_copy = 0; + sc_FH_TUERMODUL_CTRL_2329_1 = 0; + + } + if ((sc_FH_TUERMODUL_CTRL_1781_10 != 0) && (time - sc_FH_TUERMODUL_CTRL_1781_10 >= 0.5)) + { + sc_FH_TUERMODUL_CTRL_1781_10 = 0; + + } + if ((sc_FH_TUERMODUL_CTRL_1739_10 != 0) && (time - sc_FH_TUERMODUL_CTRL_1739_10 >= 0.5)) + { + sc_FH_TUERMODUL_CTRL_1739_10 = 0; + + } + if ((SYS_bit_get(Bitlist, entered_EINSCHALTSTROM_MESSEN_BLOCK_ERKENNUNG_CTRL_IDX) || BLOCK_ERKENNUNG_CTRL__N != BLOCK_ERKENNUNG_CTRL__N_old)) + tm_entered_EINSCHALTSTROM_MESSEN_BLOCK_ERKENNUNG_CTRLch_BLOCK_ERKENNUNG_CTRL__N_copy = time; + + +}/** interface **/ + + +void init(void) +{ + tm_entered_EINSCHALTSTROM_MESSEN_BLOCK_ERKENNUNG_CTRLch_BLOCK_ERKENNUNG_CTRL__N_copy = 0; + tm_entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRLexited_BEREIT_FH_TUERMODUL_CTRL = 0; + tm_entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRL = 0; + NICHT_INITIALISIERT_NICHT_INITIALISIERT_next_state = 0; + ZENTRAL_KINDERSICHERUNG_CTRL_next_state = 0; + MEC_KINDERSICHERUNG_CTRL_next_state = 0; + KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state = 0; + B_FH_TUERMODUL_CTRL_next_state = 0; + A_FH_TUERMODUL_CTRL_next_state = 0; + WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_next_state = 0; + INITIALISIERT_FH_TUERMODUL_CTRL_next_state = 0; + TIPP_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state = 0; + MANUELL_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state = 0; + OEFFNEN_FH_TUERMODUL_CTRL_next_state = 0; + SCHLIESSEN_FH_TUERMODUL_CTRL_next_state = 0; + FH_STEUERUNG_DUMMY_FH_STEUERUNG_DUMMY_next_state = 0; + EINKLEMMSCHUTZ_CTRL_EINKLEMMSCHUTZ_CTRL_next_state = 0; + BEWEGUNG_BLOCK_ERKENNUNG_CTRL_next_state = 0; + BLOCK_ERKENNUNG_CTRL_BLOCK_ERKENNUNG_CTRL_next_state = 0; + + +}/** init **/ + + + +void generic_KINDERSICHERUNG_CTRL(void) +{ + if (SYS_bit_get(Bitlist,active_KINDERSICHERUNG_CTRL_IDX)) + { + switch (KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state) + { + case 1 : /** state ZENTRAL in chart KINDERSICHERUNG_CTRL **/ + { + if (!(FH_TUERMODUL__SFHA_ZENTRAL || FH_TUERMODUL__SFHZ_ZENTRAL)) + { + stable = 0; + FH_TUERMODUL__SFHZ_copy = 0; + FH_TUERMODUL__SFHA_copy = 0; + + KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state = 3; + ZENTRAL_KINDERSICHERUNG_CTRL_next_state = 0; + break; + } + switch (ZENTRAL_KINDERSICHERUNG_CTRL_next_state) + { + case 1 : /** state IN_ZENTRAL in chart KINDERSICHERUNG_CTRL **/ + { + if ((FH_TUERMODUL__SFHA_ZENTRAL && !(FH_TUERMODUL__SFHA_ZENTRAL_old))) + { + stable = 0; + FH_TUERMODUL__SFHA_copy = 1; + + ZENTRAL_KINDERSICHERUNG_CTRL_next_state = 1; + break; + } + if ((FH_TUERMODUL__SFHZ_ZENTRAL && !(FH_TUERMODUL__SFHZ_ZENTRAL_old))) + { + stable = 0; + FH_TUERMODUL__SFHZ_copy = 1; + + ZENTRAL_KINDERSICHERUNG_CTRL_next_state = 1; + break; + } + if ((!(FH_TUERMODUL__SFHA_ZENTRAL) && FH_TUERMODUL__SFHA_ZENTRAL_old)) + { + stable = 0; + FH_TUERMODUL__SFHA_copy = 0; + + ZENTRAL_KINDERSICHERUNG_CTRL_next_state = 1; + break; + } + if ((!(FH_TUERMODUL__SFHZ_ZENTRAL) && FH_TUERMODUL__SFHZ_ZENTRAL_old)) + { + stable = 0; + FH_TUERMODUL__SFHZ_copy = 0; + + ZENTRAL_KINDERSICHERUNG_CTRL_next_state = 1; + break; + } + break; + } + default: + { + stable = 0; + break; + } + }/** switch ZENTRAL_KINDERSICHERUNG_CTRL_next_state **/ + break; + } + case 2 : /** state MEC in chart KINDERSICHERUNG_CTRL **/ + { + if (!(FH_TUERMODUL__SFHA_MEC || FH_TUERMODUL__SFHZ_MEC)) + { + stable = 0; + FH_TUERMODUL__SFHZ_copy = 0; + FH_TUERMODUL__SFHA_copy = 0; + + KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state = 3; + MEC_KINDERSICHERUNG_CTRL_next_state = 0; + break; + } + switch (MEC_KINDERSICHERUNG_CTRL_next_state) + { + case 1 : /** state INMEC in chart KINDERSICHERUNG_CTRL **/ + { + if ((FH_TUERMODUL__SFHA_MEC && !(FH_TUERMODUL__SFHA_MEC_old))) + { + stable = 0; + FH_TUERMODUL__SFHA_copy = 1; + + MEC_KINDERSICHERUNG_CTRL_next_state = 1; + break; + } + if ((FH_TUERMODUL__SFHZ_MEC && !(FH_TUERMODUL__SFHZ_MEC_old))) + { + stable = 0; + FH_TUERMODUL__SFHZ_copy = 1; + + MEC_KINDERSICHERUNG_CTRL_next_state = 1; + break; + } + if ((!(FH_TUERMODUL__SFHA_MEC) && FH_TUERMODUL__SFHA_MEC_old)) + { + stable = 0; + FH_TUERMODUL__SFHA_copy = 0; + + MEC_KINDERSICHERUNG_CTRL_next_state = 1; + break; + } + if ((!(FH_TUERMODUL__SFHZ_MEC) && FH_TUERMODUL__SFHZ_MEC_old)) + { + stable = 0; + FH_TUERMODUL__SFHZ_copy = 0; + + MEC_KINDERSICHERUNG_CTRL_next_state = 1; + break; + } + break; + } + default: + { + stable = 0; + break; + } + }/** switch MEC_KINDERSICHERUNG_CTRL_next_state **/ + break; + } + case 3 : /** state WAITING in chart KINDERSICHERUNG_CTRL **/ + { + if ((!FH_TUERMODUL__KL_50) && (FH_TUERMODUL__SFHZ_MEC && FH_TUERMODUL__SFHA_MEC)) + { + stable = 0; + FH_TUERMODUL__SFHZ_copy = 1; + FH_TUERMODUL__SFHA_copy = 1; + + KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state = 2; + break; + } + if ((!FH_TUERMODUL__KL_50) && (FH_TUERMODUL__SFHZ_MEC && !FH_TUERMODUL__SFHA_MEC)) + { + stable = 0; + FH_TUERMODUL__SFHZ_copy = 1; + + KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state = 2; + break; + } + if ((!FH_TUERMODUL__KL_50) && (!FH_TUERMODUL__SFHZ_MEC && FH_TUERMODUL__SFHA_MEC)) + { + stable = 0; + FH_TUERMODUL__SFHA_copy = 1; + + KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state = 2; + break; + } + if ((!FH_TUERMODUL__SFHZ_ZENTRAL && FH_TUERMODUL__SFHA_ZENTRAL && !FH_TUERMODUL__KL_50)) + { + stable = 0; + FH_TUERMODUL__SFHA_copy = 1; + + KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state = 1; + break; + } + if ((FH_TUERMODUL__SFHZ_ZENTRAL && FH_TUERMODUL__SFHA_ZENTRAL)) + { + stable = 0; + FH_TUERMODUL__SFHA_copy = 1; + FH_TUERMODUL__SFHZ_copy = 1; + + KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state = 1; + break; + } + if ((FH_TUERMODUL__SFHZ_ZENTRAL && !FH_TUERMODUL__SFHA_ZENTRAL && !FH_TUERMODUL__KL_50)) + { + stable = 0; + FH_TUERMODUL__SFHZ_copy = 1; + + KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state = 1; + break; + } + break; + } + default: + { + stable = 0; + KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state = 3; + break; + } + }/** switch KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state **/ + } +} + +void generic_FH_TUERMODUL_CTRL(void) +{ + if (!SYS_bit_get(Bitlist, active_FH_TUERMODUL_CTRL_IDX) && SYS_bit_get(Bitlist,active_FH_TUERMODUL_CTRL_old_IDX) && !SYS_bit_get(Bitlist,active_FH_TUERMODUL_CTRL_copy_IDX)) + { + SYS_bit_clr (Bitlist, entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_IDX); + SYS_bit_clr (Bitlist, exited_BEREIT_FH_TUERMODUL_CTRL_IDX); + } + if (SYS_bit_get(Bitlist,active_FH_TUERMODUL_CTRL_IDX)) + { + if (!SYS_bit_get(Bitlist, active_KINDERSICHERUNG_CTRL_IDX)) + { + KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state = 3; + } + SYS_bit_clr (Bitlist, active_KINDERSICHERUNG_CTRL_copy_IDX); + if (!SYS_bit_get(Bitlist, active_BLOCK_ERKENNUNG_CTRL_IDX)) + { + SYS_bit_clr (Bitlist, entered_EINSCHALTSTROM_MESSEN_BLOCK_ERKENNUNG_CTRL_IDX); + BLOCK_ERKENNUNG_CTRL_BLOCK_ERKENNUNG_CTRL_next_state = 1; + } + SYS_bit_clr (Bitlist, active_BLOCK_ERKENNUNG_CTRL_copy_IDX); + SYS_bit_set (Bitlist, active_KINDERSICHERUNG_CTRL_copy_IDX); + SYS_bit_set (Bitlist, active_BLOCK_ERKENNUNG_CTRL_copy_IDX); + switch (B_FH_TUERMODUL_CTRL_next_state) + { + case 1 : /** state ZAEHLER_WHSP_ZU_HOCH in chart FH_TUERMODUL_CTRL **/ + { + if ((FH_TUERMODUL_CTRL__N == 59 && !(FH_TUERMODUL_CTRL__N_old == 59))) + { + stable = 0; + + B_FH_TUERMODUL_CTRL_next_state = 3; + INITIALISIERT_FH_TUERMODUL_CTRL_next_state = 3; + break; + } + break; + } + case 2 : /** state NICHT_INITIALISIERT in chart FH_TUERMODUL_CTRL **/ + { + if (((FH_TUERMODUL__BLOCK && !(FH_TUERMODUL__BLOCK_old))) && ((FH_TUERMODUL__MFHZ))) + { + stable = 0; + FH_TUERMODUL__MFHZ_copy = 0; + sc_FH_TUERMODUL_CTRL_2329_1 = time; + + B_FH_TUERMODUL_CTRL_next_state = 3; + INITIALISIERT_FH_TUERMODUL_CTRL_next_state = 3; + break; + } + switch (NICHT_INITIALISIERT_NICHT_INITIALISIERT_next_state) + { + case 1 : /** state SCHLIESSEN in chart NICHT_INITIALISIERT **/ + { + if (!(FH_TUERMODUL__SFHZ)) + { + stable = 0; + FH_TUERMODUL__MFHZ_copy = 0; + + NICHT_INITIALISIERT_NICHT_INITIALISIERT_next_state = 3; + break; + } + break; + } + case 2 : /** state OEFFNEN in chart NICHT_INITIALISIERT **/ + { + if (!(FH_TUERMODUL__SFHA)) + { + stable = 0; + FH_TUERMODUL__MFHA_copy = 0; + + NICHT_INITIALISIERT_NICHT_INITIALISIERT_next_state = 3; + break; + } + break; + } + case 3 : /** state BEREIT in chart NICHT_INITIALISIERT **/ + { + if ((FH_TUERMODUL__SFHA)) + { + stable = 0; + FH_TUERMODUL__MFHA_copy = 1; + + NICHT_INITIALISIERT_NICHT_INITIALISIERT_next_state = 2; + break; + } + if ((FH_TUERMODUL__SFHZ)) + { + stable = 0; + FH_TUERMODUL__MFHZ_copy = 1; + + NICHT_INITIALISIERT_NICHT_INITIALISIERT_next_state = 1; + break; + } + break; + } + default: + { + stable = 0; + NICHT_INITIALISIERT_NICHT_INITIALISIERT_next_state = 3; + break; + } + }/** switch NICHT_INITIALISIERT_NICHT_INITIALISIERT_next_state **/ + break; + } + case 3 : /** state INITIALISIERT in chart FH_TUERMODUL_CTRL **/ + { + if (((FH_TUERMODUL_CTRL__N > 60 && !(FH_TUERMODUL_CTRL__N_old > 60))) && ((!(FH_TUERMODUL_CTRL__INREVERS1 || FH_TUERMODUL_CTRL__INREVERS2)))) + { + stable = 0; + FH_TUERMODUL__MFHZ_copy = 0; + FH_TUERMODUL__MFHA_copy = 0; + + B_FH_TUERMODUL_CTRL_next_state = 1; + break; + } + if (((FH_TUERMODUL__BLOCK && !(FH_TUERMODUL__BLOCK_old))) && ((FH_TUERMODUL__MFHA))) + { + stable = 0; + FH_TUERMODUL__MFHA_copy = 0; + sc_FH_TUERMODUL_CTRL_2375_2 = time; + + B_FH_TUERMODUL_CTRL_next_state = 2; + NICHT_INITIALISIERT_NICHT_INITIALISIERT_next_state = 3; + break; + } + if (((FH_TUERMODUL__BLOCK && !(FH_TUERMODUL__BLOCK_old))) && ((FH_TUERMODUL__MFHZ))) + { + stable = 0; + FH_TUERMODUL__MFHZ_copy = 0; + sc_FH_TUERMODUL_CTRL_2352_1 = time; + + B_FH_TUERMODUL_CTRL_next_state = 2; + NICHT_INITIALISIERT_NICHT_INITIALISIERT_next_state = 3; + break; + } + switch (INITIALISIERT_FH_TUERMODUL_CTRL_next_state) + { + case 1 : /** state OEFFNEN in chart FH_TUERMODUL_CTRL **/ + { + if ((FH_TUERMODUL__POSITION >= 405)) + { + stable = 0; + FH_TUERMODUL__MFHA_copy = 0; + + INITIALISIERT_FH_TUERMODUL_CTRL_next_state = 3; + break; + } + switch (OEFFNEN_FH_TUERMODUL_CTRL_next_state) + { + case 1 : /** state TIPP_OEFFNEN in chart FH_TUERMODUL_CTRL **/ + { + if ((FH_TUERMODUL__SFHZ && !(FH_TUERMODUL__SFHZ_old)) || (FH_TUERMODUL__SFHA && !(FH_TUERMODUL__SFHA_old))) + { + stable = 0; + FH_TUERMODUL__MFHA_copy = 0; + + INITIALISIERT_FH_TUERMODUL_CTRL_next_state = 3; + OEFFNEN_FH_TUERMODUL_CTRL_next_state = 0; + break; + } + break; + } + case 2 : /** state MAN_OEFFNEN in chart FH_TUERMODUL_CTRL **/ + { + if ((FH_TUERMODUL__SFHZ && !(FH_TUERMODUL__SFHZ_old))) + { + stable = 0; + + OEFFNEN_FH_TUERMODUL_CTRL_next_state = 1; + break; + } + if ((!(FH_TUERMODUL__SFHA) && FH_TUERMODUL__SFHA_old)) + { + stable = 0; + FH_TUERMODUL__MFHA_copy = 0; + + INITIALISIERT_FH_TUERMODUL_CTRL_next_state = 3; + OEFFNEN_FH_TUERMODUL_CTRL_next_state = 0; + break; + } + break; + } + default: + { + stable = 0; + OEFFNEN_FH_TUERMODUL_CTRL_next_state = 2; + break; + } + }/** switch OEFFNEN_FH_TUERMODUL_CTRL_next_state **/ + break; + } + case 2 : /** state SCHLIESSEN in chart FH_TUERMODUL_CTRL **/ + { + if ((FH_TUERMODUL__POSITION <= 0)) + { + stable = 0; + FH_TUERMODUL__MFHZ_copy = 0; + + INITIALISIERT_FH_TUERMODUL_CTRL_next_state = 3; + break; + } + switch (SCHLIESSEN_FH_TUERMODUL_CTRL_next_state) + { + case 1 : /** state TIPP_SCHLIESSEN in chart FH_TUERMODUL_CTRL **/ + { + if ((FH_TUERMODUL__SFHA && !(FH_TUERMODUL__SFHA_old)) || (FH_TUERMODUL__SFHZ && !(FH_TUERMODUL__SFHZ_old))) + { + stable = 0; + FH_TUERMODUL__MFHZ_copy = 0; + + INITIALISIERT_FH_TUERMODUL_CTRL_next_state = 3; + break; + } + switch (TIPP_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state) + { + case 1 : /** state REVERSIEREN2 in chart FH_TUERMODUL_CTRL **/ + { + SYS_bit_clr (Bitlist, FH_TUERMODUL_CTRL__END_REVERS_copy_IDX); + if (SYS_bit_get (Bitlist, FH_TUERMODUL_CTRL__END_REVERS_IDX)) + { + stable = 0; + FH_TUERMODUL__MFHZ_copy = 1; + FH_TUERMODUL_CTRL__INREVERS2_copy = 0; + + TIPP_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state = 2; + FH_TUERMODUL__MFHA_copy = 0; + + SYS_bit_set(Bitlist, active_EINKLEMMSCHUTZ_CTRL_copy_IDX); + break; + } + break; + } + case 2 : /** state TIPP_SCHLIESSEN1 in chart FH_TUERMODUL_CTRL **/ + { + if (SYS_bit_get (Bitlist, FH_TUERMODUL__EINKLEMMUNG_IDX)) + { + stable = 0; + FH_TUERMODUL_CTRL__INREVERS2_copy = 1; + + SYS_bit_set (Bitlist, FH_TUERMODUL_CTRL__END_REVERS_copy_IDX); + TIPP_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state = 1; + SYS_bit_clr(Bitlist, active_EINKLEMMSCHUTZ_CTRL_copy_IDX); + FH_TUERMODUL__MFHZ_copy = 0; + + sc_FH_TUERMODUL_CTRL_1781_10 = time; + FH_TUERMODUL__MFHA_copy = 1; + break; + } + break; + } + default: + { + stable = 0; + TIPP_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state = 2; + SYS_bit_set(Bitlist, active_EINKLEMMSCHUTZ_CTRL_copy_IDX); + break; + } + }/** switch TIPP_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state **/ + break; + } + case 2 : /** state MANUELL_SCHLIESSEN in chart FH_TUERMODUL_CTRL **/ + { + if ((!(FH_TUERMODUL__SFHZ) && FH_TUERMODUL__SFHZ_old)) + { + stable = 0; + FH_TUERMODUL__MFHZ_copy = 0; + + INITIALISIERT_FH_TUERMODUL_CTRL_next_state = 3; + break; + } + switch (MANUELL_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state) + { + case 1 : /** state REVERSIEREN1 in chart FH_TUERMODUL_CTRL **/ + { + SYS_bit_clr (Bitlist, FH_TUERMODUL_CTRL__END_REVERS_copy_IDX); + if (SYS_bit_get (Bitlist, FH_TUERMODUL_CTRL__END_REVERS_IDX)) + { + stable = 0; + FH_TUERMODUL_CTRL__INREVERS1_copy = 0; + + MANUELL_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state = 2; + FH_TUERMODUL__MFHA_copy = 0; + + SYS_bit_set(Bitlist, active_EINKLEMMSCHUTZ_CTRL_copy_IDX); + FH_TUERMODUL__MFHZ_copy = 1; + break; + } + break; + } + case 2 : /** state MAN_SCHLIESSEN in chart FH_TUERMODUL_CTRL **/ + { + if (SYS_bit_get (Bitlist, FH_TUERMODUL__EINKLEMMUNG_IDX)) + { + stable = 0; + FH_TUERMODUL__MFHZ_copy = 0; + FH_TUERMODUL_CTRL__INREVERS1_copy = 1; + + SYS_bit_set (Bitlist, FH_TUERMODUL_CTRL__END_REVERS_copy_IDX); + MANUELL_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state = 1; + SYS_bit_clr(Bitlist, active_EINKLEMMSCHUTZ_CTRL_copy_IDX); + + sc_FH_TUERMODUL_CTRL_1739_10 = time; + FH_TUERMODUL__MFHA_copy = 1; + break; + } + if ((FH_TUERMODUL__SFHA && !(FH_TUERMODUL__SFHA_old))) + { + stable = 0; + + SCHLIESSEN_FH_TUERMODUL_CTRL_next_state = 1; + MANUELL_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state = 0; + break; + } + break; + } + default: + { + stable = 0; + MANUELL_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state = 2; + SYS_bit_set(Bitlist, active_EINKLEMMSCHUTZ_CTRL_copy_IDX); + FH_TUERMODUL__MFHZ_copy = 1; + break; + } + }/** switch MANUELL_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state **/ + break; + } + default: + { + stable = 0; + SCHLIESSEN_FH_TUERMODUL_CTRL_next_state = 2; + MANUELL_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state = 2; + SYS_bit_set(Bitlist, active_EINKLEMMSCHUTZ_CTRL_copy_IDX); + FH_TUERMODUL__MFHZ_copy = 1; + break; + } + }/** switch SCHLIESSEN_FH_TUERMODUL_CTRL_next_state **/ + break; + } + case 3 : /** state BEREIT in chart FH_TUERMODUL_CTRL **/ + { + if (((FH_TUERMODUL__SFHZ && !(FH_TUERMODUL__SFHZ_old))) && ((FH_TUERMODUL__POSITION > 0))) + { + stable = 0; + + INITIALISIERT_FH_TUERMODUL_CTRL_next_state = 2; + SCHLIESSEN_FH_TUERMODUL_CTRL_next_state = 2; + MANUELL_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state = 2; + SYS_bit_set(Bitlist, active_EINKLEMMSCHUTZ_CTRL_copy_IDX); + FH_TUERMODUL__MFHZ_copy = 1; + break; + } + if (((FH_TUERMODUL__SFHA && !(FH_TUERMODUL__SFHA_old))) && ((FH_TUERMODUL__POSITION < 405))) + { + stable = 0; + FH_TUERMODUL__MFHA_copy = 1; + + INITIALISIERT_FH_TUERMODUL_CTRL_next_state = 1; + OEFFNEN_FH_TUERMODUL_CTRL_next_state = 2; + break; + } + break; + } + default: + { + stable = 0; + INITIALISIERT_FH_TUERMODUL_CTRL_next_state = 3; + break; + } + }/** switch INITIALISIERT_FH_TUERMODUL_CTRL_next_state **/ + break; + } + default: + { + stable = 0; + B_FH_TUERMODUL_CTRL_next_state = 2; + break; + } + }/** switch B_FH_TUERMODUL_CTRL_next_state **/ + switch (A_FH_TUERMODUL_CTRL_next_state) + { + case 1 : /** state WIEDERHOLSPERRE in chart FH_TUERMODUL_CTRL **/ + { + SYS_bit_clr (Bitlist, entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_copy_IDX); + if ((step == 1 && tm_entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRLexited_BEREIT_FH_TUERMODUL_CTRL != 0 && (time - tm_entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRLexited_BEREIT_FH_TUERMODUL_CTRL == 1)) && ((FH_TUERMODUL__MFHZ || FH_TUERMODUL__MFHA))) + { + stable = 0; + FH_TUERMODUL_CTRL__N = FH_TUERMODUL_CTRL__N + 1; + + A_FH_TUERMODUL_CTRL_next_state = 1; + SYS_bit_set (Bitlist, entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_copy_IDX); + WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_next_state = 1; + break; + } + switch (WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_next_state) + { + case 1 : /** state WDHSP in chart FH_TUERMODUL_CTRL **/ + { + if ((step == 1 && tm_entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRL != 0 && (time - tm_entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRL == 3)) && (((!(FH_TUERMODUL__MFHZ || FH_TUERMODUL__MFHA)) && FH_TUERMODUL_CTRL__N > 0))) + { + stable = 0; + FH_TUERMODUL_CTRL__N = FH_TUERMODUL_CTRL__N - 1; + + WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_next_state = 1; + break; + } + break; + } + default: + { + stable = 0; + SYS_bit_set (Bitlist, entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_copy_IDX); + WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_next_state = 1; + break; + } + }/** switch WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_next_state **/ + break; + } + default: + { + stable = 0; + FH_TUERMODUL_CTRL__N = 0; + A_FH_TUERMODUL_CTRL_next_state = 1; + SYS_bit_set (Bitlist, entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_copy_IDX); + WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_next_state = 1; + break; + } + }/** switch A_FH_TUERMODUL_CTRL_next_state **/ + SYS_bit_cpy(Bitlist, entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_copy_IDX, Bitlist, entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_IDX); + SYS_bit_cpy (Bitlist, exited_BEREIT_FH_TUERMODUL_CTRL_copy_IDX, Bitlist, exited_BEREIT_FH_TUERMODUL_CTRL_IDX); + } +} + +void generic_EINKLEMMSCHUTZ_CTRL(void) +{ + if (SYS_bit_get(Bitlist,active_EINKLEMMSCHUTZ_CTRL_IDX)) + { + switch (EINKLEMMSCHUTZ_CTRL_EINKLEMMSCHUTZ_CTRL_next_state) + { + case 1 : /** state NORMALBETRIEB in chart EINKLEMMSCHUTZ_CTRL **/ + { + if (((FH_TUERMODUL__EKS_LEISTE_AKTIV && !(FH_TUERMODUL__EKS_LEISTE_AKTIV_old))) && ((!(FH_TUERMODUL__SFHZ && FH_TUERMODUL__SFHA)))) + { + stable = 0; + + SYS_bit_set (Bitlist, FH_TUERMODUL__EINKLEMMUNG_IDX); + EINKLEMMSCHUTZ_CTRL_EINKLEMMSCHUTZ_CTRL_next_state = 2; + break; + } + break; + } + case 2 : /** state EINKLEMMUNG in chart EINKLEMMSCHUTZ_CTRL **/ + { + SYS_bit_clr (Bitlist, FH_TUERMODUL__EINKLEMMUNG_IDX); + if ((!(FH_TUERMODUL__EKS_LEISTE_AKTIV) && FH_TUERMODUL__EKS_LEISTE_AKTIV_old)) + { + stable = 0; + + EINKLEMMSCHUTZ_CTRL_EINKLEMMSCHUTZ_CTRL_next_state = 1; + break; + } + break; + } + default: + { + stable = 0; + EINKLEMMSCHUTZ_CTRL_EINKLEMMSCHUTZ_CTRL_next_state = 1; + break; + } + }/** switch EINKLEMMSCHUTZ_CTRL_EINKLEMMSCHUTZ_CTRL_next_state **/ + } +} + +void generic_BLOCK_ERKENNUNG_CTRL(void) +{ + if (!SYS_bit_get(Bitlist, active_BLOCK_ERKENNUNG_CTRL_IDX) && SYS_bit_get(Bitlist,active_BLOCK_ERKENNUNG_CTRL_old_IDX) && !SYS_bit_get(Bitlist,active_BLOCK_ERKENNUNG_CTRL_copy_IDX)) + { + SYS_bit_clr (Bitlist, entered_EINSCHALTSTROM_MESSEN_BLOCK_ERKENNUNG_CTRL_IDX); + } + if (SYS_bit_get(Bitlist,active_BLOCK_ERKENNUNG_CTRL_IDX)) + { + switch (BLOCK_ERKENNUNG_CTRL_BLOCK_ERKENNUNG_CTRL_next_state) + { + case 1 : /** state KEINE_BEWEGUNG in chart BLOCK_ERKENNUNG_CTRL **/ + { + if ((FH_TUERMODUL__I_EIN != FH_TUERMODUL__I_EIN_old) && ((FH_TUERMODUL__I_EIN > 0))) + { + stable = 0; + FH_TUERMODUL__BLOCK_copy = 0; + + BLOCK_ERKENNUNG_CTRL_BLOCK_ERKENNUNG_CTRL_next_state = 2; + BLOCK_ERKENNUNG_CTRL__N = 0; + BLOCK_ERKENNUNG_CTRL__I_EIN_MAX = 2; + BEWEGUNG_BLOCK_ERKENNUNG_CTRL_next_state = 3; + SYS_bit_set (Bitlist, entered_EINSCHALTSTROM_MESSEN_BLOCK_ERKENNUNG_CTRL_IDX); + break; + } + break; + } + case 2 : /** state BEWEGUNG in chart BLOCK_ERKENNUNG_CTRL **/ + { + if ((!(FH_TUERMODUL__MFHA) && FH_TUERMODUL__MFHA_old) || (!(FH_TUERMODUL__MFHZ) && FH_TUERMODUL__MFHZ_old)) + { + stable = 0; + + BLOCK_ERKENNUNG_CTRL_BLOCK_ERKENNUNG_CTRL_next_state = 1; + BEWEGUNG_BLOCK_ERKENNUNG_CTRL_next_state = 0; + break; + } + switch (BEWEGUNG_BLOCK_ERKENNUNG_CTRL_next_state) + { + case 1 : /** state FENSTER_BLOCKIERT in chart BLOCK_ERKENNUNG_CTRL **/ + { + break; + } + case 2 : /** state FENSTER_BEWEGT_SICH in chart BLOCK_ERKENNUNG_CTRL **/ + { + if ((FH_TUERMODUL__I_EIN > (BLOCK_ERKENNUNG_CTRL__I_EIN_MAX - 2))) + { + stable = 0; + FH_TUERMODUL__BLOCK_copy = 1; + + BEWEGUNG_BLOCK_ERKENNUNG_CTRL_next_state = 1; + break; + } + break; + } + case 3 : /** state EINSCHALTSTROM_MESSEN in chart BLOCK_ERKENNUNG_CTRL **/ + { + SYS_bit_clr (Bitlist, entered_EINSCHALTSTROM_MESSEN_BLOCK_ERKENNUNG_CTRL_IDX); + if ((BLOCK_ERKENNUNG_CTRL__N == 11 && !(BLOCK_ERKENNUNG_CTRL__N_old == 11))) + { + stable = 0; + + BEWEGUNG_BLOCK_ERKENNUNG_CTRL_next_state = 2; + break; + } + /** static reactions: **/ + if (BEWEGUNG_BLOCK_ERKENNUNG_CTRL_next_state == 3) + { + if (step == 1 && tm_entered_EINSCHALTSTROM_MESSEN_BLOCK_ERKENNUNG_CTRLch_BLOCK_ERKENNUNG_CTRL__N_copy != 0 && (time - tm_entered_EINSCHALTSTROM_MESSEN_BLOCK_ERKENNUNG_CTRLch_BLOCK_ERKENNUNG_CTRL__N_copy == 0.002)) + { + BLOCK_ERKENNUNG_CTRL__N = BLOCK_ERKENNUNG_CTRL__N + 1; + if ((FH_TUERMODUL__I_EIN > BLOCK_ERKENNUNG_CTRL__I_EIN_MAX)) + { + BLOCK_ERKENNUNG_CTRL__I_EIN_MAX = FH_TUERMODUL__I_EIN; + + } + + } + } + /** end static reactions **/ + break; + } + default: + { + stable = 0; + BLOCK_ERKENNUNG_CTRL__N = 0; + BLOCK_ERKENNUNG_CTRL__I_EIN_MAX = 2; + BEWEGUNG_BLOCK_ERKENNUNG_CTRL_next_state = 3; + SYS_bit_set (Bitlist, entered_EINSCHALTSTROM_MESSEN_BLOCK_ERKENNUNG_CTRL_IDX); + break; + } + }/** switch BEWEGUNG_BLOCK_ERKENNUNG_CTRL_next_state **/ + break; + } + default: + { + stable = 0; + BLOCK_ERKENNUNG_CTRL_BLOCK_ERKENNUNG_CTRL_next_state = 1; + break; + } + }/** switch BLOCK_ERKENNUNG_CTRL_BLOCK_ERKENNUNG_CTRL_next_state **/ + } +} + + + +void FH_DU(void) +{ + time = 1; /**SYS_get_clock()**/ + stable = 0; + step = 0; + while (!stable) + { + stable = 1; + step++; + { + switch (FH_STEUERUNG_DUMMY_FH_STEUERUNG_DUMMY_next_state) + { + case 1 : /** state SCHLIESSEN in chart FH_STEUERUNG_DUMMY **/ + { + if ((!(FH_DU__MFHZ) && FH_DU__MFHZ_old)) + { + stable = 0; + FH_DU__MFH = 0; + + FH_STEUERUNG_DUMMY_FH_STEUERUNG_DUMMY_next_state = 2; + break; + } + break; + } + case 2 : /** state BEREIT in chart FH_STEUERUNG_DUMMY **/ + { + if ((FH_DU__MFHZ && !(FH_DU__MFHZ_old))) + { + stable = 0; + FH_DU__MFH = -100; + + FH_STEUERUNG_DUMMY_FH_STEUERUNG_DUMMY_next_state = 1; + break; + } + if ((FH_DU__MFHA && !(FH_DU__MFHA_old))) + { + stable = 0; + FH_DU__MFH = 100; + + FH_STEUERUNG_DUMMY_FH_STEUERUNG_DUMMY_next_state = 3; + break; + } + break; + } + case 3 : /** state OEFFNEN in chart FH_STEUERUNG_DUMMY **/ + { + if ((!(FH_DU__MFHA) && FH_DU__MFHA_old)) + { + stable = 0; + FH_DU__MFH = 0; + + FH_STEUERUNG_DUMMY_FH_STEUERUNG_DUMMY_next_state = 2; + break; + } + break; + } + default: + { + stable = 0; + FH_DU__MFH = 0; + FH_STEUERUNG_DUMMY_FH_STEUERUNG_DUMMY_next_state = 2; + break; + } + }/** switch FH_STEUERUNG_DUMMY_FH_STEUERUNG_DUMMY_next_state **/ + } + { + { + if (!SYS_bit_get(Bitlist, active_KINDERSICHERUNG_CTRL_IDX)) + { + KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state = 3; + } + SYS_bit_clr(Bitlist, active_KINDERSICHERUNG_CTRL_copy_IDX); + if (!SYS_bit_get(Bitlist, active_EINKLEMMSCHUTZ_CTRL_IDX)) + { + EINKLEMMSCHUTZ_CTRL_EINKLEMMSCHUTZ_CTRL_next_state = 1; + } + SYS_bit_clr(Bitlist, active_EINKLEMMSCHUTZ_CTRL_copy_IDX); + if (!SYS_bit_get(Bitlist, active_BLOCK_ERKENNUNG_CTRL_IDX)) + { + SYS_bit_clr (Bitlist, entered_EINSCHALTSTROM_MESSEN_BLOCK_ERKENNUNG_CTRL_IDX); + BLOCK_ERKENNUNG_CTRL_BLOCK_ERKENNUNG_CTRL_next_state = 1; + } + SYS_bit_clr(Bitlist, active_BLOCK_ERKENNUNG_CTRL_copy_IDX); + if (!SYS_bit_get(Bitlist, active_FH_TUERMODUL_CTRL_IDX)) + { + SYS_bit_clr (Bitlist, entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_IDX); + SYS_bit_clr (Bitlist, exited_BEREIT_FH_TUERMODUL_CTRL_IDX); + B_FH_TUERMODUL_CTRL_next_state = 2; + FH_TUERMODUL_CTRL__N = 0; + A_FH_TUERMODUL_CTRL_next_state = 1; + SYS_bit_set (Bitlist, entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_copy_IDX); + WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_next_state = 1; + } + SYS_bit_clr(Bitlist, active_FH_TUERMODUL_CTRL_copy_IDX); + SYS_bit_set(Bitlist, active_KINDERSICHERUNG_CTRL_copy_IDX); + SYS_bit_set(Bitlist, active_EINKLEMMSCHUTZ_CTRL_copy_IDX); + SYS_bit_set(Bitlist, active_BLOCK_ERKENNUNG_CTRL_copy_IDX); + SYS_bit_set(Bitlist, active_FH_TUERMODUL_CTRL_copy_IDX); + /** static reactions: **/ + if (FH_DU__S_FH_TMBFZUCAN != FH_DU__S_FH_TMBFZUCAN_old) + { + if ((!FH_DU__DOOR_ID)) + { + FH_DU__S_FH_FTZU = FH_DU__S_FH_TMBFZUCAN; + + } + + } + if (FH_DU__S_FH_TMBFZUDISC != FH_DU__S_FH_TMBFZUDISC_old) + { + if (FH_DU__DOOR_ID) + { + FH_DU__S_FH_TMBFZUCAN = FH_DU__S_FH_TMBFZUDISC; + + } + + } + if (FH_DU__S_FH_TMBFAUFCAN != FH_DU__S_FH_TMBFAUFCAN_old) + { + if ((!FH_DU__DOOR_ID)) + { + FH_DU__S_FH_FTAUF = FH_DU__S_FH_TMBFAUFCAN; + + } + + } + if (FH_DU__S_FH_TMBFAUFDISC != FH_DU__S_FH_TMBFAUFDISC_old) + { + if (FH_DU__DOOR_ID) + { + FH_DU__S_FH_TMBFAUFCAN = FH_DU__S_FH_TMBFAUFDISC; + + } + + } + /** end static reactions **/ + } + } + SYS_bit_cpy(Bitlist, active_KINDERSICHERUNG_CTRL_IDX, Bitlist, active_KINDERSICHERUNG_CTRL_old_IDX); + SYS_bit_cpy(Bitlist, active_FH_TUERMODUL_CTRL_IDX, Bitlist, active_FH_TUERMODUL_CTRL_old_IDX); + SYS_bit_cpy(Bitlist, active_EINKLEMMSCHUTZ_CTRL_IDX, Bitlist, active_EINKLEMMSCHUTZ_CTRL_old_IDX); + SYS_bit_cpy(Bitlist, active_BLOCK_ERKENNUNG_CTRL_IDX, Bitlist, active_BLOCK_ERKENNUNG_CTRL_old_IDX); + FH_TUERMODUL__SFHA_MEC = FH_DU__S_FH_AUFDISC; + FH_TUERMODUL__SFHA_ZENTRAL = FH_DU__S_FH_FTAUF; + FH_TUERMODUL__SFHZ_MEC = FH_DU__S_FH_ZUDISC; + FH_TUERMODUL__SFHZ_ZENTRAL = FH_DU__S_FH_FTZU; + + generic_KINDERSICHERUNG_CTRL(); + + FH_DU__MFHA = FH_TUERMODUL__MFHA; + FH_DU__MFHZ = FH_TUERMODUL__MFHZ; + FH_DU__I_EIN = FH_TUERMODUL__I_EIN; + FH_DU__EKS_LEISTE_AKTIV = FH_TUERMODUL__EKS_LEISTE_AKTIV; + FH_DU__POSITION = FH_TUERMODUL__POSITION; + FH_DU__FT = FH_TUERMODUL__FT; + FH_DU__S_FH_AUFDISC = FH_TUERMODUL__SFHA_MEC; + FH_DU__S_FH_FTAUF = FH_TUERMODUL__SFHA_ZENTRAL; + FH_DU__S_FH_ZUDISC = FH_TUERMODUL__SFHZ_MEC; + FH_DU__S_FH_FTZU = FH_TUERMODUL__SFHZ_ZENTRAL; + FH_DU__KL_50 = FH_TUERMODUL__KL_50; + FH_DU__BLOCK = FH_TUERMODUL__BLOCK; + + FH_TUERMODUL__SFHA_MEC = FH_DU__S_FH_AUFDISC; + FH_TUERMODUL__SFHA_ZENTRAL = FH_DU__S_FH_FTAUF; + FH_TUERMODUL__SFHZ_MEC = FH_DU__S_FH_ZUDISC; + FH_TUERMODUL__SFHZ_ZENTRAL = FH_DU__S_FH_FTZU; + + generic_FH_TUERMODUL_CTRL(); + + FH_DU__MFHA = FH_TUERMODUL__MFHA; + FH_DU__MFHZ = FH_TUERMODUL__MFHZ; + FH_DU__I_EIN = FH_TUERMODUL__I_EIN; + FH_DU__EKS_LEISTE_AKTIV = FH_TUERMODUL__EKS_LEISTE_AKTIV; + FH_DU__POSITION = FH_TUERMODUL__POSITION; + FH_DU__FT = FH_TUERMODUL__FT; + FH_DU__S_FH_AUFDISC = FH_TUERMODUL__SFHA_MEC; + FH_DU__S_FH_FTAUF = FH_TUERMODUL__SFHA_ZENTRAL; + FH_DU__S_FH_ZUDISC = FH_TUERMODUL__SFHZ_MEC; + FH_DU__S_FH_FTZU = FH_TUERMODUL__SFHZ_ZENTRAL; + FH_DU__KL_50 = FH_TUERMODUL__KL_50; + FH_DU__BLOCK = FH_TUERMODUL__BLOCK; + + FH_TUERMODUL__SFHA_MEC = FH_DU__S_FH_AUFDISC; + FH_TUERMODUL__SFHA_ZENTRAL = FH_DU__S_FH_FTAUF; + FH_TUERMODUL__SFHZ_MEC = FH_DU__S_FH_ZUDISC; + FH_TUERMODUL__SFHZ_ZENTRAL = FH_DU__S_FH_FTZU; + + generic_EINKLEMMSCHUTZ_CTRL(); + + FH_DU__MFHA = FH_TUERMODUL__MFHA; + FH_DU__MFHZ = FH_TUERMODUL__MFHZ; + FH_DU__I_EIN = FH_TUERMODUL__I_EIN; + FH_DU__EKS_LEISTE_AKTIV = FH_TUERMODUL__EKS_LEISTE_AKTIV; + FH_DU__POSITION = FH_TUERMODUL__POSITION; + FH_DU__FT = FH_TUERMODUL__FT; + FH_DU__S_FH_AUFDISC = FH_TUERMODUL__SFHA_MEC; + FH_DU__S_FH_FTAUF = FH_TUERMODUL__SFHA_ZENTRAL; + FH_DU__S_FH_ZUDISC = FH_TUERMODUL__SFHZ_MEC; + FH_DU__S_FH_FTZU = FH_TUERMODUL__SFHZ_ZENTRAL; + FH_DU__KL_50 = FH_TUERMODUL__KL_50; + FH_DU__BLOCK = FH_TUERMODUL__BLOCK; + + FH_TUERMODUL__SFHA_MEC = FH_DU__S_FH_AUFDISC; + FH_TUERMODUL__SFHA_ZENTRAL = FH_DU__S_FH_FTAUF; + FH_TUERMODUL__SFHZ_MEC = FH_DU__S_FH_ZUDISC; + FH_TUERMODUL__SFHZ_ZENTRAL = FH_DU__S_FH_FTZU; + + generic_BLOCK_ERKENNUNG_CTRL(); + + FH_DU__MFHA = FH_TUERMODUL__MFHA; + FH_DU__MFHZ = FH_TUERMODUL__MFHZ; + FH_DU__I_EIN = FH_TUERMODUL__I_EIN; + FH_DU__EKS_LEISTE_AKTIV = FH_TUERMODUL__EKS_LEISTE_AKTIV; + FH_DU__POSITION = FH_TUERMODUL__POSITION; + FH_DU__FT = FH_TUERMODUL__FT; + FH_DU__S_FH_AUFDISC = FH_TUERMODUL__SFHA_MEC; + FH_DU__S_FH_FTAUF = FH_TUERMODUL__SFHA_ZENTRAL; + FH_DU__S_FH_ZUDISC = FH_TUERMODUL__SFHZ_MEC; + FH_DU__S_FH_FTZU = FH_TUERMODUL__SFHZ_ZENTRAL; + FH_DU__KL_50 = FH_TUERMODUL__KL_50; + FH_DU__BLOCK = FH_TUERMODUL__BLOCK; + + SYS_bit_cpy(Bitlist, active_KINDERSICHERUNG_CTRL_copy_IDX, Bitlist, active_KINDERSICHERUNG_CTRL_IDX); + SYS_bit_cpy(Bitlist, active_FH_TUERMODUL_CTRL_copy_IDX, Bitlist, active_FH_TUERMODUL_CTRL_IDX); + SYS_bit_cpy(Bitlist, active_EINKLEMMSCHUTZ_CTRL_copy_IDX, Bitlist, active_EINKLEMMSCHUTZ_CTRL_IDX); + SYS_bit_cpy(Bitlist, active_BLOCK_ERKENNUNG_CTRL_copy_IDX, Bitlist, active_BLOCK_ERKENNUNG_CTRL_IDX); + FH_TUERMODUL_CTRL__N_old = FH_TUERMODUL_CTRL__N; + FH_TUERMODUL__I_EIN_old = FH_TUERMODUL__I_EIN; + FH_DU__MFH = FH_DU__MFH_copy; + FH_DU__I_EIN_old = FH_DU__I_EIN; + BLOCK_ERKENNUNG_CTRL__N_old = BLOCK_ERKENNUNG_CTRL__N; + FH_TUERMODUL__SFHZ_ZENTRAL_old = FH_TUERMODUL__SFHZ_ZENTRAL; + FH_TUERMODUL__SFHZ_MEC_old = FH_TUERMODUL__SFHZ_MEC; + FH_TUERMODUL__SFHA_ZENTRAL_old = FH_TUERMODUL__SFHA_ZENTRAL; + FH_TUERMODUL__SFHA_MEC_old = FH_TUERMODUL__SFHA_MEC; + FH_TUERMODUL__BLOCK = FH_TUERMODUL__BLOCK_copy; + FH_TUERMODUL__BLOCK_old = FH_TUERMODUL__BLOCK; + FH_TUERMODUL__SFHZ = FH_TUERMODUL__SFHZ_copy; + FH_TUERMODUL__SFHZ_old = FH_TUERMODUL__SFHZ; + FH_TUERMODUL__SFHA = FH_TUERMODUL__SFHA_copy; + FH_TUERMODUL__SFHA_old = FH_TUERMODUL__SFHA; + FH_TUERMODUL__MFHZ = FH_TUERMODUL__MFHZ_copy; + FH_TUERMODUL__MFHZ_old = FH_TUERMODUL__MFHZ; + FH_TUERMODUL__MFHA = FH_TUERMODUL__MFHA_copy; + FH_TUERMODUL__MFHA_old = FH_TUERMODUL__MFHA; + FH_TUERMODUL__EKS_LEISTE_AKTIV_old = FH_TUERMODUL__EKS_LEISTE_AKTIV; + FH_DU__EKS_LEISTE_AKTIV_old = FH_DU__EKS_LEISTE_AKTIV; + FH_DU__S_FH_TMBFAUFCAN_old = FH_DU__S_FH_TMBFAUFCAN; + FH_DU__S_FH_TMBFZUCAN_old = FH_DU__S_FH_TMBFZUCAN; + FH_DU__S_FH_TMBFZUDISC_old = FH_DU__S_FH_TMBFZUDISC; + FH_DU__S_FH_TMBFAUFDISC_old = FH_DU__S_FH_TMBFAUFDISC; + FH_DU__BLOCK = FH_DU__BLOCK_copy; + FH_DU__BLOCK_old = FH_DU__BLOCK; + FH_DU__MFHZ = FH_DU__MFHZ_copy; + FH_DU__MFHZ_old = FH_DU__MFHZ; + FH_DU__MFHA = FH_DU__MFHA_copy; + FH_DU__MFHA_old = FH_DU__MFHA; + + }/** while(!stable) **/ + +}/** FH_DU **/ + + + +int main(void) +{ + init(); + interface(); + FH_DU(); + + return 0; +} diff --git a/test/src/ud.c b/test/src/ud.c new file mode 100755 index 0000000..8da5a52 --- /dev/null +++ b/test/src/ud.c @@ -0,0 +1,161 @@ +/* MDH WCET BENCHMARK SUITE. File version $Id: ud.c,v 1.4 2005/11/11 10:32:53 ael01 Exp $ */ + + +/*************************************************************************/ +/* */ +/* SNU-RT Benchmark Suite for Worst Case Timing Analysis */ +/* ===================================================== */ +/* Collected and Modified by S.-S. Lim */ +/* sslim@archi.snu.ac.kr */ +/* Real-Time Research Group */ +/* Seoul National University */ +/* */ +/* */ +/* < Features > - restrictions for our experimental environment */ +/* */ +/* 1. Completely structured. */ +/* - There are no unconditional jumps. */ +/* - There are no exit from loop bodies. */ +/* (There are no 'break' or 'return' in loop bodies) */ +/* 2. No 'switch' statements. */ +/* 3. No 'do..while' statements. */ +/* 4. Expressions are restricted. */ +/* - There are no multiple expressions joined by 'or', */ +/* 'and' operations. */ +/* 5. No library calls. */ +/* - All the functions needed are implemented in the */ +/* source file. */ +/* */ +/* */ +/*************************************************************************/ +/* */ +/* FILE: ludcmp.c */ +/* SOURCE : Turbo C Programming for Engineering */ +/* */ +/* DESCRIPTION : */ +/* */ +/* Simultaneous linear equations by LU decomposition. */ +/* The arrays a[][] and b[] are input and the array x[] is output */ +/* row vector. */ +/* The variable n is the number of equations. */ +/* The input arrays are initialized in function main. */ +/* */ +/* */ +/* REMARK : */ +/* */ +/* EXECUTION TIME : */ +/* */ +/* */ +/*************************************************************************/ + +/************************************************************************* + * This file: + * + * - Name changed to "ud.c" + * - Modified for use with Uppsala/Paderborn tool + * : doubles changed to int + * : some tests removed + * - Program is much more linear, all loops will run to end + * - Purpose: test the effect of conditional flows + * + *************************************************************************/ + + + + + + +/* +** Benchmark Suite for Real-Time Applications, by Sung-Soo Lim +** +** III-4. ludcmp.c : Simultaneous Linear Equations by LU Decomposition +** (from the book C Programming for EEs by Hyun Soon Ahn) +*/ + + + +long int a[50][50], b[50], x[50]; + +int ludcmp(int nmax, int n); + + +/* static double fabs(double n) */ +/* { */ +/* double f; */ + +/* if (n >= 0) f = n; */ +/* else f = -n; */ +/* return f; */ +/* } */ + +void main() +{ + int i, j, nmax = 50, n = 5, chkerr; + long int /* eps, */ w; + + /* eps = 1.0e-6; */ + + /* Init loop */ + for(i = 0; i <= n; i++) + { + w = 0.0; /* data to fill in cells */ + for(j = 0; j <= n; j++) + { + a[i][j] = (i + 1) + (j + 1); + if(i == j) /* only once per loop pass */ + a[i][j] *= 2.0; + w += a[i][j]; + } + b[i] = w; + } + + /* chkerr = ludcmp(nmax, n, eps); */ + chkerr = ludcmp(nmax,n); +} + +int ludcmp(int nmax, int n) +{ + int i, j, k; + long w, y[100]; + + /* if(n > 99 || eps <= 0.0) return(999); */ + for(i = 0; i < n; i++) + { + /* if(fabs(a[i][i]) <= eps) return(1); */ + for(j = i+1; j <= n; j++) /* triangular loop vs. i */ + { + w = a[j][i]; + if(i != 0) /* sub-loop is conditional, done + all iterations except first of the + OUTER loop */ + for(k = 0; k < i; k++) + w -= a[j][k] * a[k][i]; + a[j][i] = w / a[i][i]; + } + for(j = i+1; j <= n; j++) /* triangular loop vs. i */ + { + w = a[i+1][j]; + for(k = 0; k <= i; k++) /* triangular loop vs. i */ + w -= a[i+1][k] * a[k][j]; + a[i+1][j] = w; + } + } + y[0] = b[0]; + for(i = 1; i <= n; i++) /* iterates n times */ + { + w = b[i]; + for(j = 0; j < i; j++) /* triangular sub loop */ + w -= a[i][j] * y[j]; + y[i] = w; + } + x[n] = y[n] / a[n][n]; + for(i = n-1; i >= 0; i--) /* iterates n times */ + { + w = y[i]; + for(j = i+1; j <= n; j++) /* triangular sub loop */ + w -= a[i][j] * x[j]; + x[i] = w / a[i][i] ; + } + return(0); +} + diff --git a/test/src/whet.c b/test/src/whet.c new file mode 100755 index 0000000..95eefee --- /dev/null +++ b/test/src/whet.c @@ -0,0 +1,230 @@ +/* + * Whetstone benchmark in C. This program is a translation of the + * original Algol version in "A Synthetic Benchmark" by H.J. Curnow + * and B.A. Wichman in Computer Journal, Vol 19 #1, February 1976. + * + * Used to test compiler optimization and floating point performance. + * + * Compile by: cc -O -s -o whet whet.c + * or: cc -O -DPOUT -s -o whet whet.c + * if output is desired. + */ + +#define ITERATIONS 10 /* 1 Million Whetstone instructions */ + +#include "math.h" + +double x1, x2, x3, x4, x, y, z, t, t1, t2; +double e1[4]; +int i, j, k, l, n1, n2, n3, n4, n6, n7, n8, n9, n10, n11; + +main() +{ + + /* initialize constants */ + + t = 0.499975; + t1 = 0.50025; + t2 = 2.0; + + /* set values of module weights */ + + n1 = 0 * ITERATIONS; + n2 = 12 * ITERATIONS; + n3 = 14 * ITERATIONS; + n4 = 345 * ITERATIONS; + n6 = 210 * ITERATIONS; + n7 = 32 * ITERATIONS; + n8 = 899 * ITERATIONS; + n9 = 616 * ITERATIONS; + n10 = 0 * ITERATIONS; + n11 = 93 * ITERATIONS; + +/* MODULE 1: simple identifiers */ + + x1 = 1.0; + x2 = x3 = x4 = -1.0; + + for(i = 1; i <= n1; i += 1) { + x1 = ( x1 + x2 + x3 - x4 ) * t; + x2 = ( x1 + x2 - x3 - x4 ) * t; + x3 = ( x1 - x2 + x3 + x4 ) * t; + x4 = (-x1 + x2 + x3 + x4 ) * t; + } +#ifdef POUT + pout(n1, n1, n1, x1, x2, x3, x4); +#endif + + +/* MODULE 2: array elements */ + + e1[0] = 1.0; + e1[1] = e1[2] = e1[3] = -1.0; + + for (i = 1; i <= n2; i +=1) { + e1[0] = ( e1[0] + e1[1] + e1[2] - e1[3] ) * t; + e1[1] = ( e1[0] + e1[1] - e1[2] + e1[3] ) * t; + e1[2] = ( e1[0] - e1[1] + e1[2] + e1[3] ) * t; + e1[3] = (-e1[0] + e1[1] + e1[2] + e1[3] ) * t; + } +#ifdef POUT + pout(n2, n3, n2, e1[0], e1[1], e1[2], e1[3]); +#endif + +/* MODULE 3: array as parameter */ + + for (i = 1; i <= n3; i += 1) + pa(e1); +#ifdef POUT + pout(n3, n2, n2, e1[0], e1[1], e1[2], e1[3]); +#endif + +/* MODULE 4: conditional jumps */ + + j = 1; + for (i = 1; i <= n4; i += 1) { + if (j == 1) + j = 2; + else + j = 3; + + if (j > 2) + j = 0; + else + j = 1; + + if (j < 1 ) + j = 1; + else + j = 0; + } +#ifdef POUT + pout(n4, j, j, x1, x2, x3, x4); +#endif + +/* MODULE 5: omitted */ + +/* MODULE 6: integer arithmetic */ + + j = 1; + k = 2; + l = 3; + + for (i = 1; i <= n6; i += 1) { + j = j * (k - j) * (l -k); + k = l * k - (l - j) * k; + l = (l - k) * (k + j); + + e1[l - 2] = j + k + l; /* C arrays are zero based */ + e1[k - 2] = j * k * l; + } +#ifdef POUT + pout(n6, j, k, e1[0], e1[1], e1[2], e1[3]); +#endif + +/* MODULE 7: trig. functions */ + + x = y = 0.5; + + for(i = 1; i <= n7; i +=1) { + x = t * atan(t2*sin(x)*cos(x)/(cos(x+y)+cos(x-y)-1.0)); + y = t * atan(t2*sin(y)*cos(y)/(cos(x+y)+cos(x-y)-1.0)); + } +#ifdef POUT + pout(n7, j, k, x, x, y, y); +#endif + +/* MODULE 8: procedure calls */ + + x = y = z = 1.0; + + for (i = 1; i <= n8; i +=1) + p3(x, y, &z); +#ifdef POUT + pout(n8, j, k, x, y, z, z); +#endif + +/* MODULE9: array references */ + + j = 1; + k = 2; + l = 3; + + e1[0] = 1.0; + e1[1] = 2.0; + e1[2] = 3.0; + + for(i = 1; i <= n9; i += 1) + p0(); +#ifdef POUT + pout(n9, j, k, e1[0], e1[1], e1[2], e1[3]); +#endif + +/* MODULE10: integer arithmetic */ + + j = 2; + k = 3; + + for(i = 1; i <= n10; i +=1) { + j = j + k; + k = j + k; + j = k - j; + k = k - j - j; + } +#ifdef POUT + pout(n10, j, k, x1, x2, x3, x4); +#endif + +/* MODULE11: standard functions */ + + x = 0.75; + for(i = 1; i <= n11; i +=1) + x = sqrt( exp( log(x) / t1)); + +#ifdef POUT + pout(n11, j, k, x, x, x, x); +#endif +} + +pa(e) +double e[4]; +{ + register int j; + + j = 0; + lab: + e[0] = ( e[0] + e[1] + e[2] - e[3] ) * t; + e[1] = ( e[0] + e[1] - e[2] + e[3] ) * t; + e[2] = ( e[0] - e[1] + e[2] + e[3] ) * t; + e[3] = ( -e[0] + e[1] + e[2] + e[3] ) / t2; + j += 1; + if (j < 6) + goto lab; +} + + +p3(x, y, z) +double x, y, *z; +{ + x = t * (x + y); + y = t * (x + y); + *z = (x + y) /t2; +} + + +p0() +{ + e1[j] = e1[k]; + e1[k] = e1[l]; + e1[l] = e1[j]; +} + +#ifdef POUT +pout(n, j, k, x1, x2, x3, x4) +int n, j, k; +double x1, x2, x3, x4; +{ + printf("%6d%6d%6d %5e %5e %5e %5e\n", + n, j, k, x1, x2, x3, x4); +} +#endif diff --git a/test/statemate.ll b/test/statemate.ll new file mode 100644 index 0000000..435303a --- /dev/null +++ b/test/statemate.ll @@ -0,0 +1,1702 @@ +; ModuleID = 'statemate.c' +source_filename = "statemate.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +@Bitlist = internal unnamed_addr global [64 x i8] zeroinitializer, align 16 +@time = dso_local local_unnamed_addr global i64 0, align 8 +@tm_entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRL = dso_local local_unnamed_addr global i64 0, align 8 +@tm_entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRLexited_BEREIT_FH_TUERMODUL_CTRL = dso_local local_unnamed_addr global i64 0, align 8 +@sc_FH_TUERMODUL_CTRL_2375_2 = dso_local local_unnamed_addr global i64 0, align 8 +@FH_TUERMODUL__MFHA_copy = dso_local local_unnamed_addr global i8 0, align 1 +@sc_FH_TUERMODUL_CTRL_2352_1 = dso_local local_unnamed_addr global i64 0, align 8 +@FH_TUERMODUL__MFHZ_copy = dso_local local_unnamed_addr global i8 0, align 1 +@sc_FH_TUERMODUL_CTRL_2329_1 = dso_local local_unnamed_addr global i64 0, align 8 +@sc_FH_TUERMODUL_CTRL_1781_10 = dso_local local_unnamed_addr global i64 0, align 8 +@sc_FH_TUERMODUL_CTRL_1739_10 = dso_local local_unnamed_addr global i64 0, align 8 +@BLOCK_ERKENNUNG_CTRL__N = dso_local local_unnamed_addr global i32 0, align 4 +@BLOCK_ERKENNUNG_CTRL__N_old = dso_local local_unnamed_addr global i32 0, align 4 +@tm_entered_EINSCHALTSTROM_MESSEN_BLOCK_ERKENNUNG_CTRLch_BLOCK_ERKENNUNG_CTRL__N_copy = dso_local local_unnamed_addr global i64 0, align 8 +@NICHT_INITIALISIERT_NICHT_INITIALISIERT_next_state = dso_local local_unnamed_addr global i8 0, align 1 +@ZENTRAL_KINDERSICHERUNG_CTRL_next_state = dso_local local_unnamed_addr global i8 0, align 1 +@MEC_KINDERSICHERUNG_CTRL_next_state = dso_local local_unnamed_addr global i8 0, align 1 +@KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state = dso_local local_unnamed_addr global i8 0, align 1 +@B_FH_TUERMODUL_CTRL_next_state = dso_local local_unnamed_addr global i8 0, align 1 +@A_FH_TUERMODUL_CTRL_next_state = dso_local local_unnamed_addr global i8 0, align 1 +@WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_next_state = dso_local local_unnamed_addr global i8 0, align 1 +@INITIALISIERT_FH_TUERMODUL_CTRL_next_state = dso_local local_unnamed_addr global i8 0, align 1 +@TIPP_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state = dso_local local_unnamed_addr global i8 0, align 1 +@MANUELL_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state = dso_local local_unnamed_addr global i8 0, align 1 +@OEFFNEN_FH_TUERMODUL_CTRL_next_state = dso_local local_unnamed_addr global i8 0, align 1 +@SCHLIESSEN_FH_TUERMODUL_CTRL_next_state = dso_local local_unnamed_addr global i8 0, align 1 +@FH_STEUERUNG_DUMMY_FH_STEUERUNG_DUMMY_next_state = dso_local local_unnamed_addr global i8 0, align 1 +@EINKLEMMSCHUTZ_CTRL_EINKLEMMSCHUTZ_CTRL_next_state = dso_local local_unnamed_addr global i8 0, align 1 +@BEWEGUNG_BLOCK_ERKENNUNG_CTRL_next_state = dso_local local_unnamed_addr global i8 0, align 1 +@BLOCK_ERKENNUNG_CTRL_BLOCK_ERKENNUNG_CTRL_next_state = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL__SFHA_ZENTRAL = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL__SFHZ_ZENTRAL = dso_local local_unnamed_addr global i8 0, align 1 +@stable = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL__SFHZ_copy = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL__SFHA_copy = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL__SFHA_ZENTRAL_old = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL__SFHZ_ZENTRAL_old = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL__SFHA_MEC = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL__SFHZ_MEC = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL__SFHA_MEC_old = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL__SFHZ_MEC_old = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL__KL_50 = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL_CTRL__N = dso_local local_unnamed_addr global i32 0, align 4 +@FH_TUERMODUL_CTRL__N_old = dso_local local_unnamed_addr global i32 0, align 4 +@FH_TUERMODUL__BLOCK = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL__BLOCK_old = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL__MFHZ = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL__SFHZ = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL__SFHA = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL_CTRL__INREVERS1 = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL_CTRL__INREVERS2 = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL__MFHA = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL__POSITION = dso_local local_unnamed_addr global i32 0, align 4 +@FH_TUERMODUL__SFHZ_old = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL__SFHA_old = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL_CTRL__INREVERS2_copy = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL_CTRL__INREVERS1_copy = dso_local local_unnamed_addr global i8 0, align 1 +@step = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL__EKS_LEISTE_AKTIV = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL__EKS_LEISTE_AKTIV_old = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL__I_EIN = dso_local local_unnamed_addr global i32 0, align 4 +@FH_TUERMODUL__I_EIN_old = dso_local local_unnamed_addr global i32 0, align 4 +@FH_TUERMODUL__BLOCK_copy = dso_local local_unnamed_addr global i8 0, align 1 +@BLOCK_ERKENNUNG_CTRL__I_EIN_MAX = dso_local local_unnamed_addr global i32 0, align 4 +@FH_TUERMODUL__MFHA_old = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL__MFHZ_old = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__MFHZ = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__MFHZ_old = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__MFH = dso_local local_unnamed_addr global i32 0, align 4 +@FH_DU__MFHA = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__MFHA_old = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__S_FH_TMBFZUCAN = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__S_FH_TMBFZUCAN_old = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__DOOR_ID = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__S_FH_FTZU = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__S_FH_TMBFZUDISC = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__S_FH_TMBFZUDISC_old = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__S_FH_TMBFAUFCAN = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__S_FH_TMBFAUFCAN_old = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__S_FH_FTAUF = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__S_FH_TMBFAUFDISC = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__S_FH_TMBFAUFDISC_old = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__S_FH_AUFDISC = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__S_FH_ZUDISC = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__I_EIN = dso_local local_unnamed_addr global i32 0, align 4 +@FH_DU__EKS_LEISTE_AKTIV = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__POSITION = dso_local local_unnamed_addr global i32 0, align 4 +@FH_TUERMODUL__FT = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__FT = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__KL_50 = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__BLOCK = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__MFH_copy = dso_local local_unnamed_addr global i32 0, align 4 +@FH_DU__I_EIN_old = dso_local local_unnamed_addr global i32 0, align 4 +@FH_DU__EKS_LEISTE_AKTIV_old = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__BLOCK_copy = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__BLOCK_old = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__MFHZ_copy = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__MFHA_copy = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL_CTRL__N_copy = dso_local local_unnamed_addr global i32 0, align 4 +@BLOCK_ERKENNUNG_CTRL__I_EIN_MAX_copy = dso_local local_unnamed_addr global i32 0, align 4 +@BLOCK_ERKENNUNG_CTRL__N_copy = dso_local local_unnamed_addr global i32 0, align 4 +@FH_TUERMODUL_CTRL__FT = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL__COM_OPEN = dso_local local_unnamed_addr global i8 0, align 1 +@FH_TUERMODUL__COM_CLOSE = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__S_FH_TMBFAUFCAN_copy = dso_local local_unnamed_addr global i8 0, align 1 +@FH_DU__S_FH_TMBFZUCAN_copy = dso_local local_unnamed_addr global i8 0, align 1 + +; Function Attrs: mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn +define dso_local void @interface() local_unnamed_addr #0 { + %1 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 4), align 4, !tbaa !5 + %2 = icmp eq i8 %1, 0 + br i1 %2, label %5, label %3 + +3: ; preds = %0 + %4 = load i64, i64* @time, align 8, !tbaa !8 + store i64 %4, i64* @tm_entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRL, align 8, !tbaa !8 + br label %5 + +5: ; preds = %3, %0 + %6 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 6), align 2 + %7 = icmp eq i8 %6, 0 + %8 = select i1 %2, i1 %7, i1 false + br i1 %8, label %11, label %9 + +9: ; preds = %5 + %10 = load i64, i64* @time, align 8, !tbaa !8 + store i64 %10, i64* @tm_entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRLexited_BEREIT_FH_TUERMODUL_CTRL, align 8, !tbaa !8 + br label %11 + +11: ; preds = %5, %9 + %12 = load i64, i64* @sc_FH_TUERMODUL_CTRL_2375_2, align 8, !tbaa !8 + %13 = icmp eq i64 %12, 0 + %14 = load i64, i64* @time, align 8 + %15 = icmp eq i64 %14, %12 + %16 = select i1 %13, i1 true, i1 %15 + br i1 %16, label %18, label %17 + +17: ; preds = %11 + store i8 0, i8* @FH_TUERMODUL__MFHA_copy, align 1, !tbaa !5 + store i64 0, i64* @sc_FH_TUERMODUL_CTRL_2375_2, align 8, !tbaa !8 + br label %18 + +18: ; preds = %17, %11 + %19 = load i64, i64* @sc_FH_TUERMODUL_CTRL_2352_1, align 8, !tbaa !8 + %20 = icmp eq i64 %19, 0 + %21 = icmp eq i64 %14, %19 + %22 = select i1 %20, i1 true, i1 %21 + br i1 %22, label %24, label %23 + +23: ; preds = %18 + store i8 0, i8* @FH_TUERMODUL__MFHZ_copy, align 1, !tbaa !5 + store i64 0, i64* @sc_FH_TUERMODUL_CTRL_2352_1, align 8, !tbaa !8 + br label %24 + +24: ; preds = %23, %18 + %25 = load i64, i64* @sc_FH_TUERMODUL_CTRL_2329_1, align 8, !tbaa !8 + %26 = icmp eq i64 %25, 0 + %27 = icmp eq i64 %14, %25 + %28 = select i1 %26, i1 true, i1 %27 + br i1 %28, label %30, label %29 + +29: ; preds = %24 + store i8 0, i8* @FH_TUERMODUL__MFHZ_copy, align 1, !tbaa !5 + store i64 0, i64* @sc_FH_TUERMODUL_CTRL_2329_1, align 8, !tbaa !8 + br label %30 + +30: ; preds = %29, %24 + %31 = load i64, i64* @sc_FH_TUERMODUL_CTRL_1781_10, align 8, !tbaa !8 + %32 = icmp eq i64 %31, 0 + %33 = icmp eq i64 %14, %31 + %34 = select i1 %32, i1 true, i1 %33 + br i1 %34, label %36, label %35 + +35: ; preds = %30 + store i64 0, i64* @sc_FH_TUERMODUL_CTRL_1781_10, align 8, !tbaa !8 + br label %36 + +36: ; preds = %35, %30 + %37 = load i64, i64* @sc_FH_TUERMODUL_CTRL_1739_10, align 8, !tbaa !8 + %38 = icmp eq i64 %37, 0 + %39 = icmp eq i64 %14, %37 + %40 = select i1 %38, i1 true, i1 %39 + br i1 %40, label %42, label %41 + +41: ; preds = %36 + store i64 0, i64* @sc_FH_TUERMODUL_CTRL_1739_10, align 8, !tbaa !8 + br label %42 + +42: ; preds = %41, %36 + %43 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 0), align 16, !tbaa !5 + %44 = icmp eq i8 %43, 0 + br i1 %44, label %45, label %49 + +45: ; preds = %42 + %46 = load i32, i32* @BLOCK_ERKENNUNG_CTRL__N, align 4, !tbaa !10 + %47 = load i32, i32* @BLOCK_ERKENNUNG_CTRL__N_old, align 4, !tbaa !10 + %48 = icmp eq i32 %46, %47 + br i1 %48, label %50, label %49 + +49: ; preds = %45, %42 + store i64 %14, i64* @tm_entered_EINSCHALTSTROM_MESSEN_BLOCK_ERKENNUNG_CTRLch_BLOCK_ERKENNUNG_CTRL__N_copy, align 8, !tbaa !8 + br label %50 + +50: ; preds = %49, %45 + ret void +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn writeonly +define dso_local void @init() local_unnamed_addr #1 { + store i64 0, i64* @tm_entered_EINSCHALTSTROM_MESSEN_BLOCK_ERKENNUNG_CTRLch_BLOCK_ERKENNUNG_CTRL__N_copy, align 8, !tbaa !8 + store i64 0, i64* @tm_entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRLexited_BEREIT_FH_TUERMODUL_CTRL, align 8, !tbaa !8 + store i64 0, i64* @tm_entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRL, align 8, !tbaa !8 + store i8 0, i8* @NICHT_INITIALISIERT_NICHT_INITIALISIERT_next_state, align 1, !tbaa !5 + store i8 0, i8* @ZENTRAL_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @MEC_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @B_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @A_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @INITIALISIERT_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @TIPP_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @MANUELL_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @OEFFNEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @SCHLIESSEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @FH_STEUERUNG_DUMMY_FH_STEUERUNG_DUMMY_next_state, align 1, !tbaa !5 + store i8 0, i8* @EINKLEMMSCHUTZ_CTRL_EINKLEMMSCHUTZ_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @BEWEGUNG_BLOCK_ERKENNUNG_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @BLOCK_ERKENNUNG_CTRL_BLOCK_ERKENNUNG_CTRL_next_state, align 1, !tbaa !5 + ret void +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn +define dso_local void @generic_KINDERSICHERUNG_CTRL() local_unnamed_addr #0 { + %1 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 10), align 2, !tbaa !5 + %2 = icmp eq i8 %1, 0 + br i1 %2, label %108, label %3 + +3: ; preds = %0 + %4 = load i8, i8* @KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + %5 = sext i8 %4 to i32 + switch i32 %5, label %107 [ + i32 1, label %6 + i32 2, label %37 + i32 3, label %68 + ] + +6: ; preds = %3 + %7 = load i8, i8* @FH_TUERMODUL__SFHA_ZENTRAL, align 1, !tbaa !5 + %8 = icmp eq i8 %7, 0 + %9 = load i8, i8* @FH_TUERMODUL__SFHZ_ZENTRAL, align 1 + %10 = icmp eq i8 %9, 0 + %11 = select i1 %8, i1 %10, i1 false + br i1 %11, label %12, label %13 + +12: ; preds = %6 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL__SFHZ_copy, align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL__SFHA_copy, align 1, !tbaa !5 + store i8 3, i8* @KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @ZENTRAL_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + br label %108 + +13: ; preds = %6 + %14 = load i8, i8* @ZENTRAL_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + %15 = icmp eq i8 %14, 1 + br i1 %15, label %16, label %36 + +16: ; preds = %13 + %17 = load i8, i8* @FH_TUERMODUL__SFHA_ZENTRAL_old, align 1 + %18 = icmp ne i8 %17, 0 + %19 = select i1 %8, i1 true, i1 %18 + br i1 %19, label %21, label %20 + +20: ; preds = %16 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 1, i8* @FH_TUERMODUL__SFHA_copy, align 1, !tbaa !5 + store i8 1, i8* @ZENTRAL_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + br label %108 + +21: ; preds = %16 + %22 = load i8, i8* @FH_TUERMODUL__SFHZ_ZENTRAL_old, align 1 + %23 = icmp ne i8 %22, 0 + %24 = select i1 %10, i1 true, i1 %23 + br i1 %24, label %26, label %25 + +25: ; preds = %21 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 1, i8* @FH_TUERMODUL__SFHZ_copy, align 1, !tbaa !5 + store i8 1, i8* @ZENTRAL_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + br label %108 + +26: ; preds = %21 + %27 = icmp ne i8 %7, 0 + %28 = icmp eq i8 %17, 0 + %29 = select i1 %27, i1 true, i1 %28 + br i1 %29, label %31, label %30 + +30: ; preds = %26 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL__SFHA_copy, align 1, !tbaa !5 + store i8 1, i8* @ZENTRAL_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + br label %108 + +31: ; preds = %26 + %32 = icmp ne i8 %9, 0 + %33 = icmp eq i8 %22, 0 + %34 = select i1 %32, i1 true, i1 %33 + br i1 %34, label %108, label %35 + +35: ; preds = %31 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL__SFHZ_copy, align 1, !tbaa !5 + store i8 1, i8* @ZENTRAL_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + br label %108 + +36: ; preds = %13 + store i8 0, i8* @stable, align 1, !tbaa !5 + br label %108 + +37: ; preds = %3 + %38 = load i8, i8* @FH_TUERMODUL__SFHA_MEC, align 1, !tbaa !5 + %39 = icmp eq i8 %38, 0 + %40 = load i8, i8* @FH_TUERMODUL__SFHZ_MEC, align 1 + %41 = icmp eq i8 %40, 0 + %42 = select i1 %39, i1 %41, i1 false + br i1 %42, label %43, label %44 + +43: ; preds = %37 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL__SFHZ_copy, align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL__SFHA_copy, align 1, !tbaa !5 + store i8 3, i8* @KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @MEC_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + br label %108 + +44: ; preds = %37 + %45 = load i8, i8* @MEC_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + %46 = icmp eq i8 %45, 1 + br i1 %46, label %47, label %67 + +47: ; preds = %44 + %48 = load i8, i8* @FH_TUERMODUL__SFHA_MEC_old, align 1 + %49 = icmp ne i8 %48, 0 + %50 = select i1 %39, i1 true, i1 %49 + br i1 %50, label %52, label %51 + +51: ; preds = %47 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 1, i8* @FH_TUERMODUL__SFHA_copy, align 1, !tbaa !5 + store i8 1, i8* @MEC_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + br label %108 + +52: ; preds = %47 + %53 = load i8, i8* @FH_TUERMODUL__SFHZ_MEC_old, align 1 + %54 = icmp ne i8 %53, 0 + %55 = select i1 %41, i1 true, i1 %54 + br i1 %55, label %57, label %56 + +56: ; preds = %52 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 1, i8* @FH_TUERMODUL__SFHZ_copy, align 1, !tbaa !5 + store i8 1, i8* @MEC_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + br label %108 + +57: ; preds = %52 + %58 = icmp ne i8 %38, 0 + %59 = icmp eq i8 %48, 0 + %60 = select i1 %58, i1 true, i1 %59 + br i1 %60, label %62, label %61 + +61: ; preds = %57 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL__SFHA_copy, align 1, !tbaa !5 + store i8 1, i8* @MEC_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + br label %108 + +62: ; preds = %57 + %63 = icmp ne i8 %40, 0 + %64 = icmp eq i8 %53, 0 + %65 = select i1 %63, i1 true, i1 %64 + br i1 %65, label %108, label %66 + +66: ; preds = %62 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL__SFHZ_copy, align 1, !tbaa !5 + store i8 1, i8* @MEC_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + br label %108 + +67: ; preds = %44 + store i8 0, i8* @stable, align 1, !tbaa !5 + br label %108 + +68: ; preds = %3 + %69 = load i8, i8* @FH_TUERMODUL__KL_50, align 1, !tbaa !5 + %70 = icmp ne i8 %69, 0 + %71 = load i8, i8* @FH_TUERMODUL__SFHZ_MEC, align 1 + %72 = icmp eq i8 %71, 0 + %73 = select i1 %70, i1 true, i1 %72 + %74 = load i8, i8* @FH_TUERMODUL__SFHA_MEC, align 1 + %75 = icmp eq i8 %74, 0 + %76 = select i1 %73, i1 true, i1 %75 + br i1 %76, label %78, label %77 + +77: ; preds = %68 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 1, i8* @FH_TUERMODUL__SFHZ_copy, align 1, !tbaa !5 + store i8 1, i8* @FH_TUERMODUL__SFHA_copy, align 1, !tbaa !5 + store i8 2, i8* @KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + br label %108 + +78: ; preds = %68 + %79 = icmp eq i8 %69, 0 + br i1 %79, label %80, label %84 + +80: ; preds = %78 + %81 = icmp ne i8 %74, 0 + %82 = select i1 %72, i1 true, i1 %81 + br i1 %82, label %84, label %83 + +83: ; preds = %80 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 1, i8* @FH_TUERMODUL__SFHZ_copy, align 1, !tbaa !5 + store i8 2, i8* @KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + br label %108 + +84: ; preds = %80, %78 + %85 = icmp ne i8 %71, 0 + %86 = select i1 %70, i1 true, i1 %85 + %87 = select i1 %86, i1 true, i1 %75 + br i1 %87, label %89, label %88 + +88: ; preds = %84 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 1, i8* @FH_TUERMODUL__SFHA_copy, align 1, !tbaa !5 + store i8 2, i8* @KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + br label %108 + +89: ; preds = %84 + %90 = load i8, i8* @FH_TUERMODUL__SFHZ_ZENTRAL, align 1, !tbaa !5 + %91 = icmp eq i8 %90, 0 + br i1 %91, label %92, label %97 + +92: ; preds = %89 + %93 = load i8, i8* @FH_TUERMODUL__SFHA_ZENTRAL, align 1, !tbaa !5 + %94 = icmp eq i8 %93, 0 + %95 = select i1 %94, i1 true, i1 %70 + br i1 %95, label %97, label %96 + +96: ; preds = %92 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 1, i8* @FH_TUERMODUL__SFHA_copy, align 1, !tbaa !5 + store i8 1, i8* @KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + br label %108 + +97: ; preds = %92, %89 + %98 = load i8, i8* @FH_TUERMODUL__SFHA_ZENTRAL, align 1 + %99 = icmp eq i8 %98, 0 + %100 = select i1 %91, i1 true, i1 %99 + br i1 %100, label %102, label %101 + +101: ; preds = %97 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 1, i8* @FH_TUERMODUL__SFHA_copy, align 1, !tbaa !5 + store i8 1, i8* @FH_TUERMODUL__SFHZ_copy, align 1, !tbaa !5 + store i8 1, i8* @KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + br label %108 + +102: ; preds = %97 + %103 = icmp ne i8 %98, 0 + %104 = select i1 %91, i1 true, i1 %103 + %105 = select i1 %104, i1 true, i1 %70 + br i1 %105, label %108, label %106 + +106: ; preds = %102 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 1, i8* @FH_TUERMODUL__SFHZ_copy, align 1, !tbaa !5 + store i8 1, i8* @KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + br label %108 + +107: ; preds = %3 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 3, i8* @KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + br label %108 + +108: ; preds = %12, %43, %77, %83, %88, %96, %101, %106, %107, %31, %36, %35, %30, %25, %20, %62, %67, %66, %61, %56, %51, %102, %0 + ret void +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn +define dso_local void @generic_FH_TUERMODUL_CTRL() local_unnamed_addr #0 { + %1 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 13), align 1, !tbaa !5 + %2 = icmp eq i8 %1, 0 + br i1 %2, label %3, label %11 + +3: ; preds = %0 + %4 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 15), align 1, !tbaa !5 + %5 = icmp eq i8 %4, 0 + %6 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 14), align 2 + %7 = icmp ne i8 %6, 0 + %8 = select i1 %5, i1 true, i1 %7 + br i1 %8, label %10, label %9 + +9: ; preds = %3 + store i8 0, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 4), align 4, !tbaa !5 + store i8 0, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 6), align 2, !tbaa !5 + br label %10 + +10: ; preds = %9, %3 + br i1 %2, label %260, label %11 + +11: ; preds = %0, %10 + %12 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 10), align 2, !tbaa !5 + %13 = icmp eq i8 %12, 0 + br i1 %13, label %14, label %15 + +14: ; preds = %11 + store i8 3, i8* @KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + br label %15 + +15: ; preds = %14, %11 + store i8 0, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 11), align 1, !tbaa !5 + %16 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 19), align 1, !tbaa !5 + %17 = icmp eq i8 %16, 0 + br i1 %17, label %18, label %19 + +18: ; preds = %15 + store i8 0, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 0), align 16, !tbaa !5 + store i8 1, i8* @BLOCK_ERKENNUNG_CTRL_BLOCK_ERKENNUNG_CTRL_next_state, align 1, !tbaa !5 + br label %19 + +19: ; preds = %18, %15 + store i8 0, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 20), align 4, !tbaa !5 + store i8 1, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 11), align 1, !tbaa !5 + store i8 1, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 20), align 4, !tbaa !5 + %20 = load i8, i8* @B_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + %21 = sext i8 %20 to i32 + switch i32 %21, label %209 [ + i32 1, label %22 + i32 2, label %29 + i32 3, label %60 + ] + +22: ; preds = %19 + %23 = load i32, i32* @FH_TUERMODUL_CTRL__N, align 4, !tbaa !10 + %24 = icmp ne i32 %23, 59 + %25 = load i32, i32* @FH_TUERMODUL_CTRL__N_old, align 4 + %26 = icmp eq i32 %25, 59 + %27 = select i1 %24, i1 true, i1 %26 + br i1 %27, label %210, label %28 + +28: ; preds = %22 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 3, i8* @B_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 3, i8* @INITIALISIERT_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + br label %210 + +29: ; preds = %19 + %30 = load i8, i8* @FH_TUERMODUL__BLOCK, align 1, !tbaa !5 + %31 = icmp eq i8 %30, 0 + %32 = load i8, i8* @FH_TUERMODUL__BLOCK_old, align 1 + %33 = icmp ne i8 %32, 0 + %34 = select i1 %31, i1 true, i1 %33 + %35 = load i8, i8* @FH_TUERMODUL__MFHZ, align 1 + %36 = icmp eq i8 %35, 0 + %37 = select i1 %34, i1 true, i1 %36 + br i1 %37, label %40, label %38 + +38: ; preds = %29 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL__MFHZ_copy, align 1, !tbaa !5 + %39 = load i64, i64* @time, align 8, !tbaa !8 + store i64 %39, i64* @sc_FH_TUERMODUL_CTRL_2329_1, align 8, !tbaa !8 + store i8 3, i8* @B_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 3, i8* @INITIALISIERT_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + br label %210 + +40: ; preds = %29 + %41 = load i8, i8* @NICHT_INITIALISIERT_NICHT_INITIALISIERT_next_state, align 1, !tbaa !5 + %42 = sext i8 %41 to i32 + switch i32 %42, label %59 [ + i32 1, label %43 + i32 2, label %47 + i32 3, label %51 + ] + +43: ; preds = %40 + %44 = load i8, i8* @FH_TUERMODUL__SFHZ, align 1, !tbaa !5 + %45 = icmp eq i8 %44, 0 + br i1 %45, label %46, label %210 + +46: ; preds = %43 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL__MFHZ_copy, align 1, !tbaa !5 + store i8 3, i8* @NICHT_INITIALISIERT_NICHT_INITIALISIERT_next_state, align 1, !tbaa !5 + br label %210 + +47: ; preds = %40 + %48 = load i8, i8* @FH_TUERMODUL__SFHA, align 1, !tbaa !5 + %49 = icmp eq i8 %48, 0 + br i1 %49, label %50, label %210 + +50: ; preds = %47 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL__MFHA_copy, align 1, !tbaa !5 + store i8 3, i8* @NICHT_INITIALISIERT_NICHT_INITIALISIERT_next_state, align 1, !tbaa !5 + br label %210 + +51: ; preds = %40 + %52 = load i8, i8* @FH_TUERMODUL__SFHA, align 1, !tbaa !5 + %53 = icmp eq i8 %52, 0 + br i1 %53, label %55, label %54 + +54: ; preds = %51 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 1, i8* @FH_TUERMODUL__MFHA_copy, align 1, !tbaa !5 + store i8 2, i8* @NICHT_INITIALISIERT_NICHT_INITIALISIERT_next_state, align 1, !tbaa !5 + br label %210 + +55: ; preds = %51 + %56 = load i8, i8* @FH_TUERMODUL__SFHZ, align 1, !tbaa !5 + %57 = icmp eq i8 %56, 0 + br i1 %57, label %210, label %58 + +58: ; preds = %55 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 1, i8* @FH_TUERMODUL__MFHZ_copy, align 1, !tbaa !5 + store i8 1, i8* @NICHT_INITIALISIERT_NICHT_INITIALISIERT_next_state, align 1, !tbaa !5 + br label %210 + +59: ; preds = %40 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 3, i8* @NICHT_INITIALISIERT_NICHT_INITIALISIERT_next_state, align 1, !tbaa !5 + br label %210 + +60: ; preds = %19 + %61 = load i32, i32* @FH_TUERMODUL_CTRL__N, align 4, !tbaa !10 + %62 = icmp sgt i32 %61, 60 + %63 = load i32, i32* @FH_TUERMODUL_CTRL__N_old, align 4 + %64 = icmp slt i32 %63, 61 + %65 = select i1 %62, i1 %64, i1 false + %66 = load i8, i8* @FH_TUERMODUL_CTRL__INREVERS1, align 1 + %67 = icmp eq i8 %66, 0 + %68 = select i1 %65, i1 %67, i1 false + %69 = load i8, i8* @FH_TUERMODUL_CTRL__INREVERS2, align 1 + %70 = icmp eq i8 %69, 0 + %71 = select i1 %68, i1 %70, i1 false + br i1 %71, label %72, label %73 + +72: ; preds = %60 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL__MFHZ_copy, align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL__MFHA_copy, align 1, !tbaa !5 + store i8 1, i8* @B_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + br label %210 + +73: ; preds = %60 + %74 = load i8, i8* @FH_TUERMODUL__BLOCK, align 1, !tbaa !5 + %75 = icmp eq i8 %74, 0 + %76 = load i8, i8* @FH_TUERMODUL__BLOCK_old, align 1 + %77 = icmp ne i8 %76, 0 + %78 = select i1 %75, i1 true, i1 %77 + %79 = load i8, i8* @FH_TUERMODUL__MFHA, align 1 + %80 = icmp eq i8 %79, 0 + %81 = select i1 %78, i1 true, i1 %80 + br i1 %81, label %84, label %82 + +82: ; preds = %73 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL__MFHA_copy, align 1, !tbaa !5 + %83 = load i64, i64* @time, align 8, !tbaa !8 + store i64 %83, i64* @sc_FH_TUERMODUL_CTRL_2375_2, align 8, !tbaa !8 + store i8 2, i8* @B_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 3, i8* @NICHT_INITIALISIERT_NICHT_INITIALISIERT_next_state, align 1, !tbaa !5 + br label %210 + +84: ; preds = %73 + %85 = load i8, i8* @FH_TUERMODUL__MFHZ, align 1 + %86 = icmp eq i8 %85, 0 + %87 = select i1 %78, i1 true, i1 %86 + br i1 %87, label %90, label %88 + +88: ; preds = %84 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL__MFHZ_copy, align 1, !tbaa !5 + %89 = load i64, i64* @time, align 8, !tbaa !8 + store i64 %89, i64* @sc_FH_TUERMODUL_CTRL_2352_1, align 8, !tbaa !8 + store i8 2, i8* @B_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 3, i8* @NICHT_INITIALISIERT_NICHT_INITIALISIERT_next_state, align 1, !tbaa !5 + br label %210 + +90: ; preds = %84 + %91 = load i8, i8* @INITIALISIERT_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + %92 = sext i8 %91 to i32 + switch i32 %92, label %208 [ + i32 1, label %93 + i32 2, label %128 + i32 3, label %189 + ] + +93: ; preds = %90 + %94 = load i32, i32* @FH_TUERMODUL__POSITION, align 4, !tbaa !10 + %95 = icmp sgt i32 %94, 404 + br i1 %95, label %96, label %97 + +96: ; preds = %93 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL__MFHA_copy, align 1, !tbaa !5 + store i8 3, i8* @INITIALISIERT_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + br label %210 + +97: ; preds = %93 + %98 = load i8, i8* @OEFFNEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + %99 = sext i8 %98 to i32 + switch i32 %99, label %127 [ + i32 1, label %100 + i32 2, label %113 + ] + +100: ; preds = %97 + %101 = load i8, i8* @FH_TUERMODUL__SFHZ, align 1, !tbaa !5 + %102 = icmp eq i8 %101, 0 + %103 = load i8, i8* @FH_TUERMODUL__SFHZ_old, align 1 + %104 = icmp ne i8 %103, 0 + %105 = select i1 %102, i1 true, i1 %104 + br i1 %105, label %106, label %112 + +106: ; preds = %100 + %107 = load i8, i8* @FH_TUERMODUL__SFHA, align 1, !tbaa !5 + %108 = icmp eq i8 %107, 0 + %109 = load i8, i8* @FH_TUERMODUL__SFHA_old, align 1 + %110 = icmp ne i8 %109, 0 + %111 = select i1 %108, i1 true, i1 %110 + br i1 %111, label %210, label %112 + +112: ; preds = %106, %100 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL__MFHA_copy, align 1, !tbaa !5 + store i8 3, i8* @INITIALISIERT_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @OEFFNEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + br label %210 + +113: ; preds = %97 + %114 = load i8, i8* @FH_TUERMODUL__SFHZ, align 1, !tbaa !5 + %115 = icmp eq i8 %114, 0 + %116 = load i8, i8* @FH_TUERMODUL__SFHZ_old, align 1 + %117 = icmp ne i8 %116, 0 + %118 = select i1 %115, i1 true, i1 %117 + br i1 %118, label %120, label %119 + +119: ; preds = %113 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 1, i8* @OEFFNEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + br label %210 + +120: ; preds = %113 + %121 = load i8, i8* @FH_TUERMODUL__SFHA, align 1, !tbaa !5 + %122 = icmp ne i8 %121, 0 + %123 = load i8, i8* @FH_TUERMODUL__SFHA_old, align 1 + %124 = icmp eq i8 %123, 0 + %125 = select i1 %122, i1 true, i1 %124 + br i1 %125, label %210, label %126 + +126: ; preds = %120 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL__MFHA_copy, align 1, !tbaa !5 + store i8 3, i8* @INITIALISIERT_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @OEFFNEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + br label %210 + +127: ; preds = %97 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 2, i8* @OEFFNEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + br label %210 + +128: ; preds = %90 + %129 = load i32, i32* @FH_TUERMODUL__POSITION, align 4, !tbaa !10 + %130 = icmp slt i32 %129, 1 + br i1 %130, label %131, label %132 + +131: ; preds = %128 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL__MFHZ_copy, align 1, !tbaa !5 + store i8 3, i8* @INITIALISIERT_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + br label %210 + +132: ; preds = %128 + %133 = load i8, i8* @SCHLIESSEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + %134 = sext i8 %133 to i32 + switch i32 %134, label %188 [ + i32 1, label %135 + i32 2, label %161 + ] + +135: ; preds = %132 + %136 = load i8, i8* @FH_TUERMODUL__SFHA, align 1, !tbaa !5 + %137 = icmp eq i8 %136, 0 + %138 = load i8, i8* @FH_TUERMODUL__SFHA_old, align 1 + %139 = icmp ne i8 %138, 0 + %140 = select i1 %137, i1 true, i1 %139 + br i1 %140, label %141, label %147 + +141: ; preds = %135 + %142 = load i8, i8* @FH_TUERMODUL__SFHZ, align 1, !tbaa !5 + %143 = icmp eq i8 %142, 0 + %144 = load i8, i8* @FH_TUERMODUL__SFHZ_old, align 1 + %145 = icmp ne i8 %144, 0 + %146 = select i1 %143, i1 true, i1 %145 + br i1 %146, label %148, label %147 + +147: ; preds = %141, %135 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL__MFHZ_copy, align 1, !tbaa !5 + store i8 3, i8* @INITIALISIERT_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + br label %210 + +148: ; preds = %141 + %149 = load i8, i8* @TIPP_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + %150 = sext i8 %149 to i32 + switch i32 %150, label %160 [ + i32 1, label %151 + i32 2, label %155 + ] + +151: ; preds = %148 + store i8 0, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 23), align 1, !tbaa !5 + %152 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 22), align 2, !tbaa !5 + %153 = icmp eq i8 %152, 0 + br i1 %153, label %210, label %154 + +154: ; preds = %151 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 1, i8* @FH_TUERMODUL__MFHZ_copy, align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL_CTRL__INREVERS2_copy, align 1, !tbaa !5 + store i8 2, i8* @TIPP_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL__MFHA_copy, align 1, !tbaa !5 + store i8 1, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 17), align 1, !tbaa !5 + br label %210 + +155: ; preds = %148 + %156 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 24), align 8, !tbaa !5 + %157 = icmp eq i8 %156, 0 + br i1 %157, label %210, label %158 + +158: ; preds = %155 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 1, i8* @FH_TUERMODUL_CTRL__INREVERS2_copy, align 1, !tbaa !5 + store i8 1, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 23), align 1, !tbaa !5 + store i8 1, i8* @TIPP_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 17), align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL__MFHZ_copy, align 1, !tbaa !5 + %159 = load i64, i64* @time, align 8, !tbaa !8 + store i64 %159, i64* @sc_FH_TUERMODUL_CTRL_1781_10, align 8, !tbaa !8 + store i8 1, i8* @FH_TUERMODUL__MFHA_copy, align 1, !tbaa !5 + br label %210 + +160: ; preds = %148 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 2, i8* @TIPP_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 1, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 17), align 1, !tbaa !5 + br label %210 + +161: ; preds = %132 + %162 = load i8, i8* @FH_TUERMODUL__SFHZ, align 1, !tbaa !5 + %163 = icmp ne i8 %162, 0 + %164 = load i8, i8* @FH_TUERMODUL__SFHZ_old, align 1 + %165 = icmp eq i8 %164, 0 + %166 = select i1 %163, i1 true, i1 %165 + br i1 %166, label %168, label %167 + +167: ; preds = %161 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL__MFHZ_copy, align 1, !tbaa !5 + store i8 3, i8* @INITIALISIERT_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + br label %210 + +168: ; preds = %161 + %169 = load i8, i8* @MANUELL_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + %170 = sext i8 %169 to i32 + switch i32 %170, label %187 [ + i32 1, label %171 + i32 2, label %175 + ] + +171: ; preds = %168 + store i8 0, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 23), align 1, !tbaa !5 + %172 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 22), align 2, !tbaa !5 + %173 = icmp eq i8 %172, 0 + br i1 %173, label %210, label %174 + +174: ; preds = %171 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL_CTRL__INREVERS1_copy, align 1, !tbaa !5 + store i8 2, i8* @MANUELL_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL__MFHA_copy, align 1, !tbaa !5 + store i8 1, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 17), align 1, !tbaa !5 + store i8 1, i8* @FH_TUERMODUL__MFHZ_copy, align 1, !tbaa !5 + br label %210 + +175: ; preds = %168 + %176 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 24), align 8, !tbaa !5 + %177 = icmp eq i8 %176, 0 + br i1 %177, label %180, label %178 + +178: ; preds = %175 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL__MFHZ_copy, align 1, !tbaa !5 + store i8 1, i8* @FH_TUERMODUL_CTRL__INREVERS1_copy, align 1, !tbaa !5 + store i8 1, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 23), align 1, !tbaa !5 + store i8 1, i8* @MANUELL_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 17), align 1, !tbaa !5 + %179 = load i64, i64* @time, align 8, !tbaa !8 + store i64 %179, i64* @sc_FH_TUERMODUL_CTRL_1739_10, align 8, !tbaa !8 + store i8 1, i8* @FH_TUERMODUL__MFHA_copy, align 1, !tbaa !5 + br label %210 + +180: ; preds = %175 + %181 = load i8, i8* @FH_TUERMODUL__SFHA, align 1, !tbaa !5 + %182 = icmp eq i8 %181, 0 + %183 = load i8, i8* @FH_TUERMODUL__SFHA_old, align 1 + %184 = icmp ne i8 %183, 0 + %185 = select i1 %182, i1 true, i1 %184 + br i1 %185, label %210, label %186 + +186: ; preds = %180 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 1, i8* @SCHLIESSEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @MANUELL_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + br label %210 + +187: ; preds = %168 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 2, i8* @MANUELL_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 1, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 17), align 1, !tbaa !5 + store i8 1, i8* @FH_TUERMODUL__MFHZ_copy, align 1, !tbaa !5 + br label %210 + +188: ; preds = %132 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 2, i8* @SCHLIESSEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 2, i8* @MANUELL_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 1, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 17), align 1, !tbaa !5 + store i8 1, i8* @FH_TUERMODUL__MFHZ_copy, align 1, !tbaa !5 + br label %210 + +189: ; preds = %90 + %190 = load i8, i8* @FH_TUERMODUL__SFHZ, align 1, !tbaa !5 + %191 = icmp ne i8 %190, 0 + %192 = load i8, i8* @FH_TUERMODUL__SFHZ_old, align 1 + %193 = icmp eq i8 %192, 0 + %194 = select i1 %191, i1 %193, i1 false + %195 = load i32, i32* @FH_TUERMODUL__POSITION, align 4 + %196 = icmp sgt i32 %195, 0 + %197 = select i1 %194, i1 %196, i1 false + br i1 %197, label %198, label %199 + +198: ; preds = %189 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 2, i8* @INITIALISIERT_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 2, i8* @SCHLIESSEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 2, i8* @MANUELL_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 1, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 17), align 1, !tbaa !5 + store i8 1, i8* @FH_TUERMODUL__MFHZ_copy, align 1, !tbaa !5 + br label %210 + +199: ; preds = %189 + %200 = load i8, i8* @FH_TUERMODUL__SFHA, align 1, !tbaa !5 + %201 = icmp ne i8 %200, 0 + %202 = load i8, i8* @FH_TUERMODUL__SFHA_old, align 1 + %203 = icmp eq i8 %202, 0 + %204 = select i1 %201, i1 %203, i1 false + %205 = icmp slt i32 %195, 405 + %206 = select i1 %204, i1 %205, i1 false + br i1 %206, label %207, label %210 + +207: ; preds = %199 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 1, i8* @FH_TUERMODUL__MFHA_copy, align 1, !tbaa !5 + store i8 1, i8* @INITIALISIERT_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 2, i8* @OEFFNEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + br label %210 + +208: ; preds = %90 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 3, i8* @INITIALISIERT_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + br label %210 + +209: ; preds = %19 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 2, i8* @B_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + br label %210 + +210: ; preds = %96, %131, %198, %207, %208, %120, %106, %127, %126, %119, %112, %174, %178, %186, %187, %171, %180, %154, %158, %160, %151, %155, %188, %167, %147, %199, %46, %50, %54, %58, %59, %43, %47, %55, %22, %209, %88, %82, %72, %38, %28 + %211 = load i8, i8* @A_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + %212 = icmp eq i8 %211, 1 + br i1 %212, label %213, label %255 + +213: ; preds = %210 + store i8 0, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 5), align 1, !tbaa !5 + %214 = load i8, i8* @step, align 1, !tbaa !5 + %215 = icmp eq i8 %214, 1 + %216 = load i64, i64* @tm_entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRLexited_BEREIT_FH_TUERMODUL_CTRL, align 8 + %217 = icmp ne i64 %216, 0 + %218 = select i1 %215, i1 %217, i1 false + br i1 %218, label %219, label %232 + +219: ; preds = %213 + %220 = load i64, i64* @time, align 8, !tbaa !8 + %221 = sub i64 %220, %216 + %222 = icmp eq i64 %221, 1 + br i1 %222, label %223, label %232 + +223: ; preds = %219 + %224 = load i8, i8* @FH_TUERMODUL__MFHZ, align 1, !tbaa !5 + %225 = icmp eq i8 %224, 0 + %226 = load i8, i8* @FH_TUERMODUL__MFHA, align 1 + %227 = icmp eq i8 %226, 0 + %228 = select i1 %225, i1 %227, i1 false + br i1 %228, label %232, label %229 + +229: ; preds = %223 + store i8 0, i8* @stable, align 1, !tbaa !5 + %230 = load i32, i32* @FH_TUERMODUL_CTRL__N, align 4, !tbaa !10 + %231 = add nsw i32 %230, 1 + store i32 %231, i32* @FH_TUERMODUL_CTRL__N, align 4, !tbaa !10 + store i8 1, i8* @A_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 1, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 5), align 1, !tbaa !5 + br label %256 + +232: ; preds = %223, %219, %213 + %233 = load i8, i8* @WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + %234 = icmp eq i8 %233, 1 + br i1 %234, label %235, label %254 + +235: ; preds = %232 + %236 = load i64, i64* @tm_entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRL, align 8 + %237 = icmp ne i64 %236, 0 + %238 = select i1 %215, i1 %237, i1 false + br i1 %238, label %239, label %257 + +239: ; preds = %235 + %240 = load i64, i64* @time, align 8, !tbaa !8 + %241 = sub i64 %240, %236 + %242 = icmp eq i64 %241, 3 + %243 = load i8, i8* @FH_TUERMODUL__MFHZ, align 1 + %244 = icmp eq i8 %243, 0 + %245 = select i1 %242, i1 %244, i1 false + br i1 %245, label %246, label %257 + +246: ; preds = %239 + %247 = load i8, i8* @FH_TUERMODUL__MFHA, align 1, !tbaa !5 + %248 = icmp eq i8 %247, 0 + %249 = load i32, i32* @FH_TUERMODUL_CTRL__N, align 4 + %250 = icmp sgt i32 %249, 0 + %251 = select i1 %248, i1 %250, i1 false + br i1 %251, label %252, label %257 + +252: ; preds = %246 + store i8 0, i8* @stable, align 1, !tbaa !5 + %253 = add nsw i32 %249, -1 + store i32 %253, i32* @FH_TUERMODUL_CTRL__N, align 4, !tbaa !10 + br label %256 + +254: ; preds = %232 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 1, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 5), align 1, !tbaa !5 + br label %256 + +255: ; preds = %210 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i32 0, i32* @FH_TUERMODUL_CTRL__N, align 4, !tbaa !10 + store i8 1, i8* @A_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 1, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 5), align 1, !tbaa !5 + br label %256 + +256: ; preds = %229, %255, %254, %252 + store i8 1, i8* @WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + br label %257 + +257: ; preds = %256, %246, %239, %235 + %258 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 4), align 4, !tbaa !5 + store i8 %258, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 5), align 1, !tbaa !5 + %259 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 6), align 2, !tbaa !5 + store i8 %259, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 7), align 1, !tbaa !5 + br label %260 + +260: ; preds = %257, %10 + ret void +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn +define dso_local void @generic_EINKLEMMSCHUTZ_CTRL() local_unnamed_addr #0 { + %1 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 16), align 16, !tbaa !5 + %2 = icmp eq i8 %1, 0 + br i1 %2, label %29, label %3 + +3: ; preds = %0 + %4 = load i8, i8* @EINKLEMMSCHUTZ_CTRL_EINKLEMMSCHUTZ_CTRL_next_state, align 1, !tbaa !5 + %5 = sext i8 %4 to i32 + switch i32 %5, label %26 [ + i32 1, label %6 + i32 2, label %19 + ] + +6: ; preds = %3 + %7 = load i8, i8* @FH_TUERMODUL__EKS_LEISTE_AKTIV, align 1, !tbaa !5 + %8 = icmp eq i8 %7, 0 + %9 = load i8, i8* @FH_TUERMODUL__EKS_LEISTE_AKTIV_old, align 1 + %10 = icmp ne i8 %9, 0 + %11 = select i1 %8, i1 true, i1 %10 + br i1 %11, label %29, label %12 + +12: ; preds = %6 + %13 = load i8, i8* @FH_TUERMODUL__SFHZ, align 1, !tbaa !5 + %14 = icmp eq i8 %13, 0 + %15 = load i8, i8* @FH_TUERMODUL__SFHA, align 1 + %16 = icmp eq i8 %15, 0 + %17 = select i1 %14, i1 true, i1 %16 + br i1 %17, label %18, label %29 + +18: ; preds = %12 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 1, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 24), align 8, !tbaa !5 + br label %27 + +19: ; preds = %3 + store i8 0, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 24), align 8, !tbaa !5 + %20 = load i8, i8* @FH_TUERMODUL__EKS_LEISTE_AKTIV, align 1, !tbaa !5 + %21 = icmp ne i8 %20, 0 + %22 = load i8, i8* @FH_TUERMODUL__EKS_LEISTE_AKTIV_old, align 1 + %23 = icmp eq i8 %22, 0 + %24 = select i1 %21, i1 true, i1 %23 + br i1 %24, label %29, label %25 + +25: ; preds = %19 + store i8 0, i8* @stable, align 1, !tbaa !5 + br label %27 + +26: ; preds = %3 + store i8 0, i8* @stable, align 1, !tbaa !5 + br label %27 + +27: ; preds = %26, %25, %18 + %28 = phi i8 [ 2, %18 ], [ 1, %25 ], [ 1, %26 ] + store i8 %28, i8* @EINKLEMMSCHUTZ_CTRL_EINKLEMMSCHUTZ_CTRL_next_state, align 1, !tbaa !5 + br label %29 + +29: ; preds = %27, %12, %6, %19, %0 + ret void +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn +define dso_local void @generic_BLOCK_ERKENNUNG_CTRL() local_unnamed_addr #0 { + %1 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 19), align 1, !tbaa !5 + %2 = icmp eq i8 %1, 0 + br i1 %2, label %3, label %11 + +3: ; preds = %0 + %4 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 21), align 1, !tbaa !5 + %5 = icmp eq i8 %4, 0 + %6 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 20), align 4 + %7 = icmp ne i8 %6, 0 + %8 = select i1 %5, i1 true, i1 %7 + br i1 %8, label %10, label %9 + +9: ; preds = %3 + store i8 0, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 0), align 16, !tbaa !5 + br label %10 + +10: ; preds = %9, %3 + br i1 %2, label %52, label %11 + +11: ; preds = %0, %10 + %12 = load i8, i8* @BLOCK_ERKENNUNG_CTRL_BLOCK_ERKENNUNG_CTRL_next_state, align 1, !tbaa !5 + %13 = sext i8 %12 to i32 + switch i32 %13, label %51 [ + i32 1, label %14 + i32 2, label %21 + ] + +14: ; preds = %11 + %15 = load i32, i32* @FH_TUERMODUL__I_EIN, align 4, !tbaa !10 + %16 = load i32, i32* @FH_TUERMODUL__I_EIN_old, align 4, !tbaa !10 + %17 = icmp ne i32 %15, %16 + %18 = icmp sgt i32 %15, 0 + %19 = and i1 %18, %17 + br i1 %19, label %20, label %52 + +20: ; preds = %14 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 0, i8* @FH_TUERMODUL__BLOCK_copy, align 1, !tbaa !5 + store i8 2, i8* @BLOCK_ERKENNUNG_CTRL_BLOCK_ERKENNUNG_CTRL_next_state, align 1, !tbaa !5 + store i32 0, i32* @BLOCK_ERKENNUNG_CTRL__N, align 4, !tbaa !10 + store i32 2, i32* @BLOCK_ERKENNUNG_CTRL__I_EIN_MAX, align 4, !tbaa !10 + store i8 3, i8* @BEWEGUNG_BLOCK_ERKENNUNG_CTRL_next_state, align 1, !tbaa !5 + store i8 1, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 0), align 16, !tbaa !5 + br label %52 + +21: ; preds = %11 + %22 = load i8, i8* @FH_TUERMODUL__MFHA, align 1, !tbaa !5 + %23 = icmp ne i8 %22, 0 + %24 = load i8, i8* @FH_TUERMODUL__MFHA_old, align 1 + %25 = icmp eq i8 %24, 0 + %26 = select i1 %23, i1 true, i1 %25 + br i1 %26, label %27, label %33 + +27: ; preds = %21 + %28 = load i8, i8* @FH_TUERMODUL__MFHZ, align 1, !tbaa !5 + %29 = icmp ne i8 %28, 0 + %30 = load i8, i8* @FH_TUERMODUL__MFHZ_old, align 1 + %31 = icmp eq i8 %30, 0 + %32 = select i1 %29, i1 true, i1 %31 + br i1 %32, label %34, label %33 + +33: ; preds = %27, %21 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 1, i8* @BLOCK_ERKENNUNG_CTRL_BLOCK_ERKENNUNG_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @BEWEGUNG_BLOCK_ERKENNUNG_CTRL_next_state, align 1, !tbaa !5 + br label %52 + +34: ; preds = %27 + %35 = load i8, i8* @BEWEGUNG_BLOCK_ERKENNUNG_CTRL_next_state, align 1, !tbaa !5 + %36 = sext i8 %35 to i32 + switch i32 %36, label %50 [ + i32 1, label %52 + i32 2, label %37 + i32 3, label %43 + ] + +37: ; preds = %34 + %38 = load i32, i32* @FH_TUERMODUL__I_EIN, align 4, !tbaa !10 + %39 = load i32, i32* @BLOCK_ERKENNUNG_CTRL__I_EIN_MAX, align 4, !tbaa !10 + %40 = add nsw i32 %39, -2 + %41 = icmp sgt i32 %38, %40 + br i1 %41, label %42, label %52 + +42: ; preds = %37 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 1, i8* @FH_TUERMODUL__BLOCK_copy, align 1, !tbaa !5 + store i8 1, i8* @BEWEGUNG_BLOCK_ERKENNUNG_CTRL_next_state, align 1, !tbaa !5 + br label %52 + +43: ; preds = %34 + store i8 0, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 0), align 16, !tbaa !5 + %44 = load i32, i32* @BLOCK_ERKENNUNG_CTRL__N, align 4, !tbaa !10 + %45 = icmp ne i32 %44, 11 + %46 = load i32, i32* @BLOCK_ERKENNUNG_CTRL__N_old, align 4 + %47 = icmp eq i32 %46, 11 + %48 = select i1 %45, i1 true, i1 %47 + br i1 %48, label %52, label %49 + +49: ; preds = %43 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 2, i8* @BEWEGUNG_BLOCK_ERKENNUNG_CTRL_next_state, align 1, !tbaa !5 + br label %52 + +50: ; preds = %34 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i32 0, i32* @BLOCK_ERKENNUNG_CTRL__N, align 4, !tbaa !10 + store i32 2, i32* @BLOCK_ERKENNUNG_CTRL__I_EIN_MAX, align 4, !tbaa !10 + store i8 3, i8* @BEWEGUNG_BLOCK_ERKENNUNG_CTRL_next_state, align 1, !tbaa !5 + store i8 1, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 0), align 16, !tbaa !5 + br label %52 + +51: ; preds = %11 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 1, i8* @BLOCK_ERKENNUNG_CTRL_BLOCK_ERKENNUNG_CTRL_next_state, align 1, !tbaa !5 + br label %52 + +52: ; preds = %43, %20, %33, %51, %14, %37, %34, %50, %49, %42, %10 + ret void +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local void @FH_DU() local_unnamed_addr #2 { + store i64 1, i64* @time, align 8, !tbaa !8 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 0, i8* @step, align 1, !tbaa !5 + br label %1 + +1: ; preds = %0, %134 + store i8 1, i8* @stable, align 1, !tbaa !5 + %2 = load i8, i8* @step, align 1, !tbaa !5 + %3 = add i8 %2, 1 + store i8 %3, i8* @step, align 1, !tbaa !5 + %4 = load i8, i8* @FH_STEUERUNG_DUMMY_FH_STEUERUNG_DUMMY_next_state, align 1, !tbaa !5 + %5 = sext i8 %4 to i32 + switch i32 %5, label %30 [ + i32 1, label %6 + i32 2, label %12 + i32 3, label %24 + ] + +6: ; preds = %1 + %7 = load i8, i8* @FH_DU__MFHZ, align 1, !tbaa !5 + %8 = icmp ne i8 %7, 0 + %9 = load i8, i8* @FH_DU__MFHZ_old, align 1 + %10 = icmp eq i8 %9, 0 + %11 = select i1 %8, i1 true, i1 %10 + br i1 %11, label %33, label %30 + +12: ; preds = %1 + %13 = load i8, i8* @FH_DU__MFHZ, align 1, !tbaa !5 + %14 = icmp eq i8 %13, 0 + %15 = load i8, i8* @FH_DU__MFHZ_old, align 1 + %16 = icmp ne i8 %15, 0 + %17 = select i1 %14, i1 true, i1 %16 + br i1 %17, label %18, label %30 + +18: ; preds = %12 + %19 = load i8, i8* @FH_DU__MFHA, align 1, !tbaa !5 + %20 = icmp eq i8 %19, 0 + %21 = load i8, i8* @FH_DU__MFHA_old, align 1 + %22 = icmp ne i8 %21, 0 + %23 = select i1 %20, i1 true, i1 %22 + br i1 %23, label %33, label %30 + +24: ; preds = %1 + %25 = load i8, i8* @FH_DU__MFHA, align 1, !tbaa !5 + %26 = icmp ne i8 %25, 0 + %27 = load i8, i8* @FH_DU__MFHA_old, align 1 + %28 = icmp eq i8 %27, 0 + %29 = select i1 %26, i1 true, i1 %28 + br i1 %29, label %33, label %30 + +30: ; preds = %1, %24, %18, %12, %6 + %31 = phi i32 [ 0, %6 ], [ -100, %12 ], [ 100, %18 ], [ 0, %24 ], [ 0, %1 ] + %32 = phi i8 [ 2, %6 ], [ 1, %12 ], [ 3, %18 ], [ 2, %24 ], [ 2, %1 ] + store i8 0, i8* @stable, align 1, !tbaa !5 + store i32 %31, i32* @FH_DU__MFH, align 4, !tbaa !10 + store i8 %32, i8* @FH_STEUERUNG_DUMMY_FH_STEUERUNG_DUMMY_next_state, align 1, !tbaa !5 + br label %33 + +33: ; preds = %30, %24, %18, %6 + %34 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 10), align 2, !tbaa !5 + %35 = icmp eq i8 %34, 0 + br i1 %35, label %36, label %37 + +36: ; preds = %33 + store i8 3, i8* @KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + br label %37 + +37: ; preds = %36, %33 + store i8 0, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 11), align 1, !tbaa !5 + %38 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 16), align 16, !tbaa !5 + %39 = icmp eq i8 %38, 0 + br i1 %39, label %40, label %41 + +40: ; preds = %37 + store i8 1, i8* @EINKLEMMSCHUTZ_CTRL_EINKLEMMSCHUTZ_CTRL_next_state, align 1, !tbaa !5 + br label %41 + +41: ; preds = %40, %37 + store i8 0, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 17), align 1, !tbaa !5 + %42 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 19), align 1, !tbaa !5 + %43 = icmp eq i8 %42, 0 + br i1 %43, label %44, label %45 + +44: ; preds = %41 + store i8 0, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 0), align 16, !tbaa !5 + store i8 1, i8* @BLOCK_ERKENNUNG_CTRL_BLOCK_ERKENNUNG_CTRL_next_state, align 1, !tbaa !5 + br label %45 + +45: ; preds = %44, %41 + store i8 0, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 20), align 4, !tbaa !5 + %46 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 13), align 1, !tbaa !5 + %47 = icmp eq i8 %46, 0 + br i1 %47, label %48, label %49 + +48: ; preds = %45 + store i8 0, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 4), align 4, !tbaa !5 + store i8 0, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 6), align 2, !tbaa !5 + store i8 2, i8* @B_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i32 0, i32* @FH_TUERMODUL_CTRL__N, align 4, !tbaa !10 + store i8 1, i8* @A_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 1, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 5), align 1, !tbaa !5 + store i8 1, i8* @WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + br label %49 + +49: ; preds = %48, %45 + store i8 0, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 14), align 2, !tbaa !5 + store i8 1, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 11), align 1, !tbaa !5 + store i8 1, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 17), align 1, !tbaa !5 + store i8 1, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 20), align 4, !tbaa !5 + store i8 1, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 14), align 2, !tbaa !5 + %50 = load i8, i8* @FH_DU__S_FH_TMBFZUCAN, align 1, !tbaa !5 + %51 = load i8, i8* @FH_DU__S_FH_TMBFZUCAN_old, align 1, !tbaa !5 + %52 = icmp eq i8 %50, %51 + %53 = load i8, i8* @FH_DU__DOOR_ID, align 1 + %54 = icmp ne i8 %53, 0 + %55 = select i1 %52, i1 true, i1 %54 + br i1 %55, label %57, label %56 + +56: ; preds = %49 + store i8 %50, i8* @FH_DU__S_FH_FTZU, align 1, !tbaa !5 + br label %57 + +57: ; preds = %56, %49 + %58 = load i8, i8* @FH_DU__S_FH_TMBFZUDISC, align 1, !tbaa !5 + %59 = load i8, i8* @FH_DU__S_FH_TMBFZUDISC_old, align 1, !tbaa !5 + %60 = icmp ne i8 %58, %59 + %61 = select i1 %60, i1 %54, i1 false + br i1 %61, label %62, label %63 + +62: ; preds = %57 + store i8 %58, i8* @FH_DU__S_FH_TMBFZUCAN, align 1, !tbaa !5 + br label %63 + +63: ; preds = %62, %57 + %64 = load i8, i8* @FH_DU__S_FH_TMBFAUFCAN, align 1, !tbaa !5 + %65 = load i8, i8* @FH_DU__S_FH_TMBFAUFCAN_old, align 1, !tbaa !5 + %66 = icmp eq i8 %64, %65 + %67 = select i1 %66, i1 true, i1 %54 + br i1 %67, label %69, label %68 + +68: ; preds = %63 + store i8 %64, i8* @FH_DU__S_FH_FTAUF, align 1, !tbaa !5 + br label %69 + +69: ; preds = %68, %63 + %70 = load i8, i8* @FH_DU__S_FH_TMBFAUFDISC, align 1, !tbaa !5 + %71 = load i8, i8* @FH_DU__S_FH_TMBFAUFDISC_old, align 1, !tbaa !5 + %72 = icmp ne i8 %70, %71 + %73 = select i1 %72, i1 %54, i1 false + br i1 %73, label %74, label %75 + +74: ; preds = %69 + store i8 %70, i8* @FH_DU__S_FH_TMBFAUFCAN, align 1, !tbaa !5 + br label %75 + +75: ; preds = %74, %69 + %76 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 12), align 4, !tbaa !5 + store i8 %76, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 10), align 2, !tbaa !5 + %77 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 15), align 1, !tbaa !5 + store i8 %77, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 13), align 1, !tbaa !5 + %78 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 18), align 2, !tbaa !5 + store i8 %78, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 16), align 16, !tbaa !5 + %79 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 21), align 1, !tbaa !5 + store i8 %79, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 19), align 1, !tbaa !5 + %80 = load i8, i8* @FH_DU__S_FH_AUFDISC, align 1, !tbaa !5 + store i8 %80, i8* @FH_TUERMODUL__SFHA_MEC, align 1, !tbaa !5 + %81 = load i8, i8* @FH_DU__S_FH_FTAUF, align 1, !tbaa !5 + store i8 %81, i8* @FH_TUERMODUL__SFHA_ZENTRAL, align 1, !tbaa !5 + %82 = load i8, i8* @FH_DU__S_FH_ZUDISC, align 1, !tbaa !5 + store i8 %82, i8* @FH_TUERMODUL__SFHZ_MEC, align 1, !tbaa !5 + %83 = load i8, i8* @FH_DU__S_FH_FTZU, align 1, !tbaa !5 + store i8 %83, i8* @FH_TUERMODUL__SFHZ_ZENTRAL, align 1, !tbaa !5 + call void @generic_KINDERSICHERUNG_CTRL() + %84 = load i8, i8* @FH_TUERMODUL__MFHA, align 1, !tbaa !5 + store i8 %84, i8* @FH_DU__MFHA, align 1, !tbaa !5 + %85 = load i8, i8* @FH_TUERMODUL__MFHZ, align 1, !tbaa !5 + store i8 %85, i8* @FH_DU__MFHZ, align 1, !tbaa !5 + %86 = load i32, i32* @FH_TUERMODUL__I_EIN, align 4, !tbaa !10 + store i32 %86, i32* @FH_DU__I_EIN, align 4, !tbaa !10 + %87 = load i8, i8* @FH_TUERMODUL__EKS_LEISTE_AKTIV, align 1, !tbaa !5 + store i8 %87, i8* @FH_DU__EKS_LEISTE_AKTIV, align 1, !tbaa !5 + %88 = load i32, i32* @FH_TUERMODUL__POSITION, align 4, !tbaa !10 + store i32 %88, i32* @FH_DU__POSITION, align 4, !tbaa !10 + %89 = load i8, i8* @FH_TUERMODUL__FT, align 1, !tbaa !5 + store i8 %89, i8* @FH_DU__FT, align 1, !tbaa !5 + %90 = load i8, i8* @FH_TUERMODUL__SFHA_MEC, align 1, !tbaa !5 + store i8 %90, i8* @FH_DU__S_FH_AUFDISC, align 1, !tbaa !5 + %91 = load i8, i8* @FH_TUERMODUL__SFHA_ZENTRAL, align 1, !tbaa !5 + store i8 %91, i8* @FH_DU__S_FH_FTAUF, align 1, !tbaa !5 + %92 = load i8, i8* @FH_TUERMODUL__SFHZ_MEC, align 1, !tbaa !5 + store i8 %92, i8* @FH_DU__S_FH_ZUDISC, align 1, !tbaa !5 + %93 = load i8, i8* @FH_TUERMODUL__SFHZ_ZENTRAL, align 1, !tbaa !5 + store i8 %93, i8* @FH_DU__S_FH_FTZU, align 1, !tbaa !5 + %94 = load i8, i8* @FH_TUERMODUL__KL_50, align 1, !tbaa !5 + store i8 %94, i8* @FH_DU__KL_50, align 1, !tbaa !5 + %95 = load i8, i8* @FH_TUERMODUL__BLOCK, align 1, !tbaa !5 + store i8 %95, i8* @FH_DU__BLOCK, align 1, !tbaa !5 + call void @generic_FH_TUERMODUL_CTRL() + %96 = load i8, i8* @FH_TUERMODUL__MFHA, align 1, !tbaa !5 + store i8 %96, i8* @FH_DU__MFHA, align 1, !tbaa !5 + %97 = load i8, i8* @FH_TUERMODUL__MFHZ, align 1, !tbaa !5 + store i8 %97, i8* @FH_DU__MFHZ, align 1, !tbaa !5 + %98 = load i32, i32* @FH_TUERMODUL__I_EIN, align 4, !tbaa !10 + store i32 %98, i32* @FH_DU__I_EIN, align 4, !tbaa !10 + %99 = load i8, i8* @FH_TUERMODUL__EKS_LEISTE_AKTIV, align 1, !tbaa !5 + store i8 %99, i8* @FH_DU__EKS_LEISTE_AKTIV, align 1, !tbaa !5 + %100 = load i32, i32* @FH_TUERMODUL__POSITION, align 4, !tbaa !10 + store i32 %100, i32* @FH_DU__POSITION, align 4, !tbaa !10 + %101 = load i8, i8* @FH_TUERMODUL__FT, align 1, !tbaa !5 + store i8 %101, i8* @FH_DU__FT, align 1, !tbaa !5 + %102 = load i8, i8* @FH_TUERMODUL__SFHA_MEC, align 1, !tbaa !5 + store i8 %102, i8* @FH_DU__S_FH_AUFDISC, align 1, !tbaa !5 + %103 = load i8, i8* @FH_TUERMODUL__SFHA_ZENTRAL, align 1, !tbaa !5 + store i8 %103, i8* @FH_DU__S_FH_FTAUF, align 1, !tbaa !5 + %104 = load i8, i8* @FH_TUERMODUL__SFHZ_MEC, align 1, !tbaa !5 + store i8 %104, i8* @FH_DU__S_FH_ZUDISC, align 1, !tbaa !5 + %105 = load i8, i8* @FH_TUERMODUL__SFHZ_ZENTRAL, align 1, !tbaa !5 + store i8 %105, i8* @FH_DU__S_FH_FTZU, align 1, !tbaa !5 + %106 = load i8, i8* @FH_TUERMODUL__KL_50, align 1, !tbaa !5 + store i8 %106, i8* @FH_DU__KL_50, align 1, !tbaa !5 + %107 = load i8, i8* @FH_TUERMODUL__BLOCK, align 1, !tbaa !5 + store i8 %107, i8* @FH_DU__BLOCK, align 1, !tbaa !5 + %108 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 16), align 16, !tbaa !5 + %109 = icmp eq i8 %108, 0 + br i1 %109, label %134, label %110 + +110: ; preds = %75 + %111 = load i8, i8* @EINKLEMMSCHUTZ_CTRL_EINKLEMMSCHUTZ_CTRL_next_state, align 1, !tbaa !5 + %112 = sext i8 %111 to i32 + switch i32 %112, label %131 [ + i32 1, label %113 + i32 2, label %125 + ] + +113: ; preds = %110 + %114 = icmp eq i8 %99, 0 + %115 = load i8, i8* @FH_TUERMODUL__EKS_LEISTE_AKTIV_old, align 1 + %116 = icmp ne i8 %115, 0 + %117 = select i1 %114, i1 true, i1 %116 + br i1 %117, label %134, label %118 + +118: ; preds = %113 + %119 = load i8, i8* @FH_TUERMODUL__SFHZ, align 1, !tbaa !5 + %120 = icmp eq i8 %119, 0 + %121 = load i8, i8* @FH_TUERMODUL__SFHA, align 1 + %122 = icmp eq i8 %121, 0 + %123 = select i1 %120, i1 true, i1 %122 + br i1 %123, label %124, label %134 + +124: ; preds = %118 + store i8 0, i8* @stable, align 1, !tbaa !5 + store i8 1, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 24), align 8, !tbaa !5 + br label %132 + +125: ; preds = %110 + store i8 0, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 24), align 8, !tbaa !5 + %126 = icmp ne i8 %99, 0 + %127 = load i8, i8* @FH_TUERMODUL__EKS_LEISTE_AKTIV_old, align 1 + %128 = icmp eq i8 %127, 0 + %129 = select i1 %126, i1 true, i1 %128 + br i1 %129, label %134, label %130 + +130: ; preds = %125 + store i8 0, i8* @stable, align 1, !tbaa !5 + br label %132 + +131: ; preds = %110 + store i8 0, i8* @stable, align 1, !tbaa !5 + br label %132 + +132: ; preds = %131, %130, %124 + %133 = phi i8 [ 2, %124 ], [ 1, %130 ], [ 1, %131 ] + store i8 %133, i8* @EINKLEMMSCHUTZ_CTRL_EINKLEMMSCHUTZ_CTRL_next_state, align 1, !tbaa !5 + br label %134 + +134: ; preds = %132, %75, %113, %118, %125 + store i8 %96, i8* @FH_DU__MFHA, align 1, !tbaa !5 + store i8 %97, i8* @FH_DU__MFHZ, align 1, !tbaa !5 + store i32 %98, i32* @FH_DU__I_EIN, align 4, !tbaa !10 + store i8 %99, i8* @FH_DU__EKS_LEISTE_AKTIV, align 1, !tbaa !5 + store i32 %100, i32* @FH_DU__POSITION, align 4, !tbaa !10 + store i8 %101, i8* @FH_DU__FT, align 1, !tbaa !5 + store i8 %102, i8* @FH_DU__S_FH_AUFDISC, align 1, !tbaa !5 + store i8 %103, i8* @FH_DU__S_FH_FTAUF, align 1, !tbaa !5 + store i8 %104, i8* @FH_DU__S_FH_ZUDISC, align 1, !tbaa !5 + store i8 %105, i8* @FH_DU__S_FH_FTZU, align 1, !tbaa !5 + store i8 %106, i8* @FH_DU__KL_50, align 1, !tbaa !5 + store i8 %107, i8* @FH_DU__BLOCK, align 1, !tbaa !5 + call void @generic_BLOCK_ERKENNUNG_CTRL() + %135 = load i8, i8* @FH_TUERMODUL__MFHA, align 1, !tbaa !5 + store i8 %135, i8* @FH_DU__MFHA, align 1, !tbaa !5 + %136 = load i8, i8* @FH_TUERMODUL__MFHZ, align 1, !tbaa !5 + store i8 %136, i8* @FH_DU__MFHZ, align 1, !tbaa !5 + %137 = load i32, i32* @FH_TUERMODUL__I_EIN, align 4, !tbaa !10 + store i32 %137, i32* @FH_DU__I_EIN, align 4, !tbaa !10 + %138 = load i8, i8* @FH_TUERMODUL__EKS_LEISTE_AKTIV, align 1, !tbaa !5 + store i8 %138, i8* @FH_DU__EKS_LEISTE_AKTIV, align 1, !tbaa !5 + %139 = load i32, i32* @FH_TUERMODUL__POSITION, align 4, !tbaa !10 + store i32 %139, i32* @FH_DU__POSITION, align 4, !tbaa !10 + %140 = load i8, i8* @FH_TUERMODUL__FT, align 1, !tbaa !5 + store i8 %140, i8* @FH_DU__FT, align 1, !tbaa !5 + %141 = load i8, i8* @FH_TUERMODUL__SFHA_MEC, align 1, !tbaa !5 + store i8 %141, i8* @FH_DU__S_FH_AUFDISC, align 1, !tbaa !5 + %142 = load i8, i8* @FH_TUERMODUL__SFHA_ZENTRAL, align 1, !tbaa !5 + store i8 %142, i8* @FH_DU__S_FH_FTAUF, align 1, !tbaa !5 + %143 = load i8, i8* @FH_TUERMODUL__SFHZ_MEC, align 1, !tbaa !5 + store i8 %143, i8* @FH_DU__S_FH_ZUDISC, align 1, !tbaa !5 + %144 = load i8, i8* @FH_TUERMODUL__SFHZ_ZENTRAL, align 1, !tbaa !5 + store i8 %144, i8* @FH_DU__S_FH_FTZU, align 1, !tbaa !5 + %145 = load i8, i8* @FH_TUERMODUL__KL_50, align 1, !tbaa !5 + store i8 %145, i8* @FH_DU__KL_50, align 1, !tbaa !5 + %146 = load i8, i8* @FH_TUERMODUL__BLOCK, align 1, !tbaa !5 + store i8 %146, i8* @FH_DU__BLOCK, align 1, !tbaa !5 + %147 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 10), align 2, !tbaa !5 + store i8 %147, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 11), align 1, !tbaa !5 + %148 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 13), align 1, !tbaa !5 + store i8 %148, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 14), align 2, !tbaa !5 + %149 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 16), align 16, !tbaa !5 + store i8 %149, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 17), align 1, !tbaa !5 + %150 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 19), align 1, !tbaa !5 + store i8 %150, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @Bitlist, i64 0, i64 20), align 4, !tbaa !5 + %151 = load i32, i32* @FH_TUERMODUL_CTRL__N, align 4, !tbaa !10 + store i32 %151, i32* @FH_TUERMODUL_CTRL__N_old, align 4, !tbaa !10 + store i32 %137, i32* @FH_TUERMODUL__I_EIN_old, align 4, !tbaa !10 + %152 = load i32, i32* @FH_DU__MFH_copy, align 4, !tbaa !10 + store i32 %152, i32* @FH_DU__MFH, align 4, !tbaa !10 + store i32 %137, i32* @FH_DU__I_EIN_old, align 4, !tbaa !10 + %153 = load i32, i32* @BLOCK_ERKENNUNG_CTRL__N, align 4, !tbaa !10 + store i32 %153, i32* @BLOCK_ERKENNUNG_CTRL__N_old, align 4, !tbaa !10 + store i8 %144, i8* @FH_TUERMODUL__SFHZ_ZENTRAL_old, align 1, !tbaa !5 + store i8 %143, i8* @FH_TUERMODUL__SFHZ_MEC_old, align 1, !tbaa !5 + store i8 %142, i8* @FH_TUERMODUL__SFHA_ZENTRAL_old, align 1, !tbaa !5 + store i8 %141, i8* @FH_TUERMODUL__SFHA_MEC_old, align 1, !tbaa !5 + %154 = load i8, i8* @FH_TUERMODUL__BLOCK_copy, align 1, !tbaa !5 + store i8 %154, i8* @FH_TUERMODUL__BLOCK, align 1, !tbaa !5 + store i8 %154, i8* @FH_TUERMODUL__BLOCK_old, align 1, !tbaa !5 + %155 = load i8, i8* @FH_TUERMODUL__SFHZ_copy, align 1, !tbaa !5 + store i8 %155, i8* @FH_TUERMODUL__SFHZ, align 1, !tbaa !5 + store i8 %155, i8* @FH_TUERMODUL__SFHZ_old, align 1, !tbaa !5 + %156 = load i8, i8* @FH_TUERMODUL__SFHA_copy, align 1, !tbaa !5 + store i8 %156, i8* @FH_TUERMODUL__SFHA, align 1, !tbaa !5 + store i8 %156, i8* @FH_TUERMODUL__SFHA_old, align 1, !tbaa !5 + %157 = load i8, i8* @FH_TUERMODUL__MFHZ_copy, align 1, !tbaa !5 + store i8 %157, i8* @FH_TUERMODUL__MFHZ, align 1, !tbaa !5 + store i8 %157, i8* @FH_TUERMODUL__MFHZ_old, align 1, !tbaa !5 + %158 = load i8, i8* @FH_TUERMODUL__MFHA_copy, align 1, !tbaa !5 + store i8 %158, i8* @FH_TUERMODUL__MFHA, align 1, !tbaa !5 + store i8 %158, i8* @FH_TUERMODUL__MFHA_old, align 1, !tbaa !5 + store i8 %138, i8* @FH_TUERMODUL__EKS_LEISTE_AKTIV_old, align 1, !tbaa !5 + store i8 %138, i8* @FH_DU__EKS_LEISTE_AKTIV_old, align 1, !tbaa !5 + %159 = load i8, i8* @FH_DU__S_FH_TMBFAUFCAN, align 1, !tbaa !5 + store i8 %159, i8* @FH_DU__S_FH_TMBFAUFCAN_old, align 1, !tbaa !5 + %160 = load i8, i8* @FH_DU__S_FH_TMBFZUCAN, align 1, !tbaa !5 + store i8 %160, i8* @FH_DU__S_FH_TMBFZUCAN_old, align 1, !tbaa !5 + %161 = load i8, i8* @FH_DU__S_FH_TMBFZUDISC, align 1, !tbaa !5 + store i8 %161, i8* @FH_DU__S_FH_TMBFZUDISC_old, align 1, !tbaa !5 + %162 = load i8, i8* @FH_DU__S_FH_TMBFAUFDISC, align 1, !tbaa !5 + store i8 %162, i8* @FH_DU__S_FH_TMBFAUFDISC_old, align 1, !tbaa !5 + %163 = load i8, i8* @FH_DU__BLOCK_copy, align 1, !tbaa !5 + store i8 %163, i8* @FH_DU__BLOCK, align 1, !tbaa !5 + store i8 %163, i8* @FH_DU__BLOCK_old, align 1, !tbaa !5 + %164 = load i8, i8* @FH_DU__MFHZ_copy, align 1, !tbaa !5 + store i8 %164, i8* @FH_DU__MFHZ, align 1, !tbaa !5 + store i8 %164, i8* @FH_DU__MFHZ_old, align 1, !tbaa !5 + %165 = load i8, i8* @FH_DU__MFHA_copy, align 1, !tbaa !5 + store i8 %165, i8* @FH_DU__MFHA, align 1, !tbaa !5 + store i8 %165, i8* @FH_DU__MFHA_old, align 1, !tbaa !5 + %166 = load i8, i8* @stable, align 1, !tbaa !5 + %167 = icmp eq i8 %166, 0 + br i1 %167, label %1, label %168, !llvm.loop !12 + +168: ; preds = %134 + ret void +} + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local i32 @main() local_unnamed_addr #2 { + store i64 0, i64* @tm_entered_EINSCHALTSTROM_MESSEN_BLOCK_ERKENNUNG_CTRLch_BLOCK_ERKENNUNG_CTRL__N_copy, align 8, !tbaa !8 + store i64 0, i64* @tm_entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRLexited_BEREIT_FH_TUERMODUL_CTRL, align 8, !tbaa !8 + store i64 0, i64* @tm_entered_WIEDERHOLSPERRE_FH_TUERMODUL_CTRL, align 8, !tbaa !8 + store i8 0, i8* @NICHT_INITIALISIERT_NICHT_INITIALISIERT_next_state, align 1, !tbaa !5 + store i8 0, i8* @ZENTRAL_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @MEC_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @KINDERSICHERUNG_CTRL_KINDERSICHERUNG_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @B_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @A_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @WIEDERHOLSPERRE_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @INITIALISIERT_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @TIPP_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @MANUELL_SCHLIESSEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @OEFFNEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @SCHLIESSEN_FH_TUERMODUL_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @FH_STEUERUNG_DUMMY_FH_STEUERUNG_DUMMY_next_state, align 1, !tbaa !5 + store i8 0, i8* @EINKLEMMSCHUTZ_CTRL_EINKLEMMSCHUTZ_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @BEWEGUNG_BLOCK_ERKENNUNG_CTRL_next_state, align 1, !tbaa !5 + store i8 0, i8* @BLOCK_ERKENNUNG_CTRL_BLOCK_ERKENNUNG_CTRL_next_state, align 1, !tbaa !5 + call void @interface() + call void @FH_DU() + ret i32 0 +} + +attributes #0 = { mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #1 = { mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn writeonly "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #2 = { nofree norecurse nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = !{!6, !6, i64 0} +!6 = !{!"omnipotent char", !7, i64 0} +!7 = !{!"Simple C/C++ TBAA"} +!8 = !{!9, !9, i64 0} +!9 = !{!"long", !6, i64 0} +!10 = !{!11, !11, i64 0} +!11 = !{!"int", !6, i64 0} +!12 = distinct !{!12, !13, !14} +!13 = !{!"llvm.loop.mustprogress"} +!14 = !{!"llvm.loop.unroll.disable"} diff --git a/test/ud.ll b/test/ud.ll new file mode 100644 index 0000000..700d4dd --- /dev/null +++ b/test/ud.ll @@ -0,0 +1,253 @@ +; ModuleID = 'ud.c' +source_filename = "ud.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +@a = dso_local local_unnamed_addr global [50 x [50 x i64]] zeroinitializer, align 16 +@b = dso_local local_unnamed_addr global [50 x i64] zeroinitializer, align 16 +@x = dso_local local_unnamed_addr global [50 x i64] zeroinitializer, align 16 + +; Function Attrs: nofree nosync nounwind sspstrong uwtable +define dso_local void @main() local_unnamed_addr #0 { + br label %1 + +1: ; preds = %0, %16 + %2 = phi i64 [ 0, %0 ], [ %3, %16 ] + %3 = add nuw nsw i64 %2, 1 + br label %4 + +4: ; preds = %1, %4 + %5 = phi i64 [ 0, %1 ], [ %7, %4 ] + %6 = phi i64 [ 0, %1 ], [ %14, %4 ] + %7 = add nuw nsw i64 %5, 1 + %8 = add nuw nsw i64 %3, %7 + %9 = getelementptr inbounds [50 x [50 x i64]], [50 x [50 x i64]]* @a, i64 0, i64 %2, i64 %5 + %10 = icmp eq i64 %2, %5 + %11 = shl nuw i64 %8, 1 + %12 = and i64 %11, 8589934590 + %13 = select i1 %10, i64 %12, i64 %8 + store i64 %13, i64* %9, align 8, !tbaa !5 + %14 = add nuw nsw i64 %13, %6 + %15 = icmp eq i64 %7, 6 + br i1 %15, label %16, label %4, !llvm.loop !9 + +16: ; preds = %4 + %17 = getelementptr inbounds [50 x i64], [50 x i64]* @b, i64 0, i64 %2 + store i64 %14, i64* %17, align 8, !tbaa !5 + %18 = icmp eq i64 %3, 6 + br i1 %18, label %19, label %1, !llvm.loop !12 + +19: ; preds = %16 + %20 = call i32 @ludcmp(i32 undef, i32 5) + ret void +} + +; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn +declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #1 + +; Function Attrs: nofree nosync nounwind sspstrong uwtable +define dso_local i32 @ludcmp(i32 %0, i32 %1) local_unnamed_addr #0 { + %3 = alloca [100 x i64], align 16 + %4 = bitcast [100 x i64]* %3 to i8* + call void @llvm.lifetime.start.p0i8(i64 800, i8* nonnull %4) #2 + %5 = icmp sgt i32 %1, 0 + br i1 %5, label %6, label %59 + +6: ; preds = %2 + %7 = add i32 %1, 1 + %8 = zext i32 %1 to i64 + %9 = zext i32 %7 to i64 + %10 = zext i32 %7 to i64 + br label %14 + +11: ; preds = %56 + %12 = add nuw nsw i64 %16, 1 + %13 = icmp eq i64 %17, %8 + br i1 %13, label %59, label %14, !llvm.loop !13 + +14: ; preds = %6, %11 + %15 = phi i64 [ 0, %6 ], [ %17, %11 ] + %16 = phi i64 [ 1, %6 ], [ %12, %11 ] + %17 = add nuw nsw i64 %15, 1 + %18 = icmp eq i64 %15, 0 + %19 = getelementptr inbounds [50 x [50 x i64]], [50 x [50 x i64]]* @a, i64 0, i64 %15, i64 %15 + br label %20 + +20: ; preds = %14, %35 + %21 = phi i64 [ %16, %14 ], [ %39, %35 ] + %22 = getelementptr inbounds [50 x [50 x i64]], [50 x [50 x i64]]* @a, i64 0, i64 %21, i64 %15 + %23 = load i64, i64* %22, align 8, !tbaa !5 + br i1 %18, label %35, label %24 + +24: ; preds = %20, %24 + %25 = phi i64 [ %33, %24 ], [ 0, %20 ] + %26 = phi i64 [ %32, %24 ], [ %23, %20 ] + %27 = getelementptr inbounds [50 x [50 x i64]], [50 x [50 x i64]]* @a, i64 0, i64 %21, i64 %25 + %28 = load i64, i64* %27, align 8, !tbaa !5 + %29 = getelementptr inbounds [50 x [50 x i64]], [50 x [50 x i64]]* @a, i64 0, i64 %25, i64 %15 + %30 = load i64, i64* %29, align 8, !tbaa !5 + %31 = mul nsw i64 %30, %28 + %32 = sub nsw i64 %26, %31 + %33 = add nuw nsw i64 %25, 1 + %34 = icmp eq i64 %33, %15 + br i1 %34, label %35, label %24, !llvm.loop !14 + +35: ; preds = %24, %20 + %36 = phi i64 [ %23, %20 ], [ %32, %24 ] + %37 = load i64, i64* %19, align 8, !tbaa !5 + %38 = sdiv i64 %36, %37 + store i64 %38, i64* %22, align 8, !tbaa !5 + %39 = add nuw nsw i64 %21, 1 + %40 = icmp eq i64 %39, %9 + br i1 %40, label %41, label %20, !llvm.loop !15 + +41: ; preds = %35, %56 + %42 = phi i64 [ %57, %56 ], [ %16, %35 ] + %43 = getelementptr inbounds [50 x [50 x i64]], [50 x [50 x i64]]* @a, i64 0, i64 %17, i64 %42 + %44 = load i64, i64* %43, align 8, !tbaa !5 + br label %45 + +45: ; preds = %41, %45 + %46 = phi i64 [ 0, %41 ], [ %54, %45 ] + %47 = phi i64 [ %44, %41 ], [ %53, %45 ] + %48 = getelementptr inbounds [50 x [50 x i64]], [50 x [50 x i64]]* @a, i64 0, i64 %17, i64 %46 + %49 = load i64, i64* %48, align 8, !tbaa !5 + %50 = getelementptr inbounds [50 x [50 x i64]], [50 x [50 x i64]]* @a, i64 0, i64 %46, i64 %42 + %51 = load i64, i64* %50, align 8, !tbaa !5 + %52 = mul nsw i64 %51, %49 + %53 = sub nsw i64 %47, %52 + %54 = add nuw nsw i64 %46, 1 + %55 = icmp eq i64 %54, %16 + br i1 %55, label %56, label %45, !llvm.loop !16 + +56: ; preds = %45 + store i64 %53, i64* %43, align 8, !tbaa !5 + %57 = add nuw nsw i64 %42, 1 + %58 = icmp eq i64 %57, %10 + br i1 %58, label %11, label %41, !llvm.loop !17 + +59: ; preds = %11, %2 + %60 = load i64, i64* getelementptr inbounds ([50 x i64], [50 x i64]* @b, i64 0, i64 0), align 16, !tbaa !5 + %61 = getelementptr inbounds [100 x i64], [100 x i64]* %3, i64 0, i64 0 + store i64 %60, i64* %61, align 16, !tbaa !5 + %62 = icmp slt i32 %1, 1 + br i1 %62, label %85, label %63 + +63: ; preds = %59 + %64 = add i32 %1, 1 + %65 = zext i32 %64 to i64 + br label %66 + +66: ; preds = %63, %81 + %67 = phi i64 [ 1, %63 ], [ %83, %81 ] + %68 = getelementptr inbounds [50 x i64], [50 x i64]* @b, i64 0, i64 %67 + %69 = load i64, i64* %68, align 8, !tbaa !5 + br label %70 + +70: ; preds = %66, %70 + %71 = phi i64 [ 0, %66 ], [ %79, %70 ] + %72 = phi i64 [ %69, %66 ], [ %78, %70 ] + %73 = getelementptr inbounds [50 x [50 x i64]], [50 x [50 x i64]]* @a, i64 0, i64 %67, i64 %71 + %74 = load i64, i64* %73, align 8, !tbaa !5 + %75 = getelementptr inbounds [100 x i64], [100 x i64]* %3, i64 0, i64 %71 + %76 = load i64, i64* %75, align 8, !tbaa !5 + %77 = mul nsw i64 %76, %74 + %78 = sub nsw i64 %72, %77 + %79 = add nuw nsw i64 %71, 1 + %80 = icmp eq i64 %79, %67 + br i1 %80, label %81, label %70, !llvm.loop !18 + +81: ; preds = %70 + %82 = getelementptr inbounds [100 x i64], [100 x i64]* %3, i64 0, i64 %67 + store i64 %78, i64* %82, align 8, !tbaa !5 + %83 = add nuw nsw i64 %67, 1 + %84 = icmp eq i64 %83, %65 + br i1 %84, label %85, label %66, !llvm.loop !19 + +85: ; preds = %81, %59 + %86 = sext i32 %1 to i64 + %87 = getelementptr inbounds [100 x i64], [100 x i64]* %3, i64 0, i64 %86 + %88 = load i64, i64* %87, align 8, !tbaa !5 + %89 = getelementptr inbounds [50 x [50 x i64]], [50 x [50 x i64]]* @a, i64 0, i64 %86, i64 %86 + %90 = load i64, i64* %89, align 8, !tbaa !5 + %91 = sdiv i64 %88, %90 + %92 = getelementptr inbounds [50 x i64], [50 x i64]* @x, i64 0, i64 %86 + store i64 %91, i64* %92, align 8, !tbaa !5 + %93 = icmp sgt i32 %1, 0 + br i1 %93, label %94, label %123 + +94: ; preds = %85 + %95 = sext i32 %1 to i64 + %96 = add i32 %1, 1 + %97 = sext i32 %1 to i64 + br label %98 + +98: ; preds = %94, %116 + %99 = phi i64 [ %95, %94 ], [ %100, %116 ] + %100 = add nsw i64 %99, -1 + %101 = getelementptr inbounds [100 x i64], [100 x i64]* %3, i64 0, i64 %100 + %102 = load i64, i64* %101, align 8, !tbaa !5 + %103 = icmp sgt i64 %99, %97 + br i1 %103, label %116, label %104 + +104: ; preds = %98, %104 + %105 = phi i64 [ %113, %104 ], [ %99, %98 ] + %106 = phi i64 [ %112, %104 ], [ %102, %98 ] + %107 = getelementptr inbounds [50 x [50 x i64]], [50 x [50 x i64]]* @a, i64 0, i64 %100, i64 %105 + %108 = load i64, i64* %107, align 8, !tbaa !5 + %109 = getelementptr inbounds [50 x i64], [50 x i64]* @x, i64 0, i64 %105 + %110 = load i64, i64* %109, align 8, !tbaa !5 + %111 = mul nsw i64 %110, %108 + %112 = sub nsw i64 %106, %111 + %113 = add nsw i64 %105, 1 + %114 = trunc i64 %113 to i32 + %115 = icmp eq i32 %96, %114 + br i1 %115, label %116, label %104, !llvm.loop !20 + +116: ; preds = %104, %98 + %117 = phi i64 [ %102, %98 ], [ %112, %104 ] + %118 = getelementptr inbounds [50 x [50 x i64]], [50 x [50 x i64]]* @a, i64 0, i64 %100, i64 %100 + %119 = load i64, i64* %118, align 8, !tbaa !5 + %120 = sdiv i64 %117, %119 + %121 = getelementptr inbounds [50 x i64], [50 x i64]* @x, i64 0, i64 %100 + store i64 %120, i64* %121, align 8, !tbaa !5 + %122 = icmp sgt i64 %99, 1 + br i1 %122, label %98, label %123, !llvm.loop !21 + +123: ; preds = %116, %85 + call void @llvm.lifetime.end.p0i8(i64 800, i8* nonnull %4) #2 + ret i32 0 +} + +; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn +declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) #1 + +attributes #0 = { nofree nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #1 = { argmemonly mustprogress nofree nosync nounwind willreturn } +attributes #2 = { nounwind } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = !{!6, !6, i64 0} +!6 = !{!"long", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C/C++ TBAA"} +!9 = distinct !{!9, !10, !11} +!10 = !{!"llvm.loop.mustprogress"} +!11 = !{!"llvm.loop.unroll.disable"} +!12 = distinct !{!12, !10, !11} +!13 = distinct !{!13, !10, !11} +!14 = distinct !{!14, !10, !11} +!15 = distinct !{!15, !10, !11} +!16 = distinct !{!16, !10, !11} +!17 = distinct !{!17, !10, !11} +!18 = distinct !{!18, !10, !11} +!19 = distinct !{!19, !10, !11} +!20 = distinct !{!20, !10, !11} +!21 = distinct !{!21, !10, !11} diff --git a/test/whet.ll b/test/whet.ll new file mode 100644 index 0000000..190c0ac --- /dev/null +++ b/test/whet.ll @@ -0,0 +1,452 @@ +; ModuleID = 'whet.c' +source_filename = "whet.c" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +@t = dso_local local_unnamed_addr global double 0.000000e+00, align 8 +@t1 = dso_local local_unnamed_addr global double 0.000000e+00, align 8 +@t2 = dso_local local_unnamed_addr global double 0.000000e+00, align 8 +@n1 = dso_local local_unnamed_addr global i32 0, align 4 +@n2 = dso_local local_unnamed_addr global i32 0, align 4 +@n3 = dso_local local_unnamed_addr global i32 0, align 4 +@n4 = dso_local local_unnamed_addr global i32 0, align 4 +@n6 = dso_local local_unnamed_addr global i32 0, align 4 +@n7 = dso_local local_unnamed_addr global i32 0, align 4 +@n8 = dso_local local_unnamed_addr global i32 0, align 4 +@n9 = dso_local local_unnamed_addr global i32 0, align 4 +@n10 = dso_local local_unnamed_addr global i32 0, align 4 +@n11 = dso_local local_unnamed_addr global i32 0, align 4 +@x1 = dso_local local_unnamed_addr global double 0.000000e+00, align 8 +@x4 = dso_local local_unnamed_addr global double 0.000000e+00, align 8 +@x3 = dso_local local_unnamed_addr global double 0.000000e+00, align 8 +@x2 = dso_local local_unnamed_addr global double 0.000000e+00, align 8 +@i = dso_local local_unnamed_addr global i32 0, align 4 +@e1 = dso_local local_unnamed_addr global [4 x double] zeroinitializer, align 16 +@j = dso_local local_unnamed_addr global i32 0, align 4 +@k = dso_local local_unnamed_addr global i32 0, align 4 +@l = dso_local local_unnamed_addr global i32 0, align 4 +@y = dso_local local_unnamed_addr global double 0.000000e+00, align 8 +@x = dso_local local_unnamed_addr global double 0.000000e+00, align 8 +@z = dso_local local_unnamed_addr global double 0.000000e+00, align 8 + +; Function Attrs: nofree nounwind sspstrong uwtable +define dso_local i32 @main() local_unnamed_addr #0 { + store double 4.999750e-01, double* @t, align 8, !tbaa !5 + store double 5.002500e-01, double* @t1, align 8, !tbaa !5 + store double 2.000000e+00, double* @t2, align 8, !tbaa !5 + store i32 0, i32* @n1, align 4, !tbaa !9 + store i32 120, i32* @n2, align 4, !tbaa !9 + store i32 140, i32* @n3, align 4, !tbaa !9 + store i32 3450, i32* @n4, align 4, !tbaa !9 + store i32 2100, i32* @n6, align 4, !tbaa !9 + store i32 320, i32* @n7, align 4, !tbaa !9 + store i32 8990, i32* @n8, align 4, !tbaa !9 + store i32 6160, i32* @n9, align 4, !tbaa !9 + store i32 0, i32* @n10, align 4, !tbaa !9 + store i32 930, i32* @n11, align 4, !tbaa !9 + store double 1.000000e+00, double* @x1, align 8, !tbaa !5 + store double -1.000000e+00, double* @x4, align 8, !tbaa !5 + store double -1.000000e+00, double* @x3, align 8, !tbaa !5 + store double -1.000000e+00, double* @x2, align 8, !tbaa !5 + store i32 1, i32* @i, align 4, !tbaa !9 + store double 1.000000e+00, double* getelementptr inbounds ([4 x double], [4 x double]* @e1, i64 0, i64 0), align 16, !tbaa !5 + store double -1.000000e+00, double* getelementptr inbounds ([4 x double], [4 x double]* @e1, i64 0, i64 3), align 8, !tbaa !5 + store double -1.000000e+00, double* getelementptr inbounds ([4 x double], [4 x double]* @e1, i64 0, i64 2), align 16, !tbaa !5 + store double -1.000000e+00, double* getelementptr inbounds ([4 x double], [4 x double]* @e1, i64 0, i64 1), align 8, !tbaa !5 + br label %2 + +1: ; preds = %2 + store i32 121, i32* @i, align 4, !tbaa !9 + store double %11, double* getelementptr inbounds ([4 x double], [4 x double]* @e1, i64 0, i64 0), align 16, !tbaa !5 + store double %15, double* getelementptr inbounds ([4 x double], [4 x double]* @e1, i64 0, i64 1), align 8, !tbaa !5 + store double %19, double* getelementptr inbounds ([4 x double], [4 x double]* @e1, i64 0, i64 2), align 16, !tbaa !5 + store double %23, double* getelementptr inbounds ([4 x double], [4 x double]* @e1, i64 0, i64 3), align 8, !tbaa !5 + br label %26 + +2: ; preds = %0, %2 + %3 = phi i32 [ 1, %0 ], [ %24, %2 ] + %4 = phi double [ 1.000000e+00, %0 ], [ %11, %2 ] + %5 = phi double [ -1.000000e+00, %0 ], [ %15, %2 ] + %6 = phi double [ -1.000000e+00, %0 ], [ %19, %2 ] + %7 = phi double [ -1.000000e+00, %0 ], [ %23, %2 ] + %8 = fadd double %4, %5 + %9 = fadd double %8, %6 + %10 = fsub double %9, %7 + %11 = fmul double %10, 4.999750e-01 + %12 = fadd double %5, %11 + %13 = fsub double %12, %6 + %14 = fadd double %7, %13 + %15 = fmul double %14, 4.999750e-01 + %16 = fsub double %11, %15 + %17 = fadd double %6, %16 + %18 = fadd double %7, %17 + %19 = fmul double %18, 4.999750e-01 + %20 = fsub double %15, %11 + %21 = fadd double %20, %19 + %22 = fadd double %7, %21 + %23 = fmul double %22, 4.999750e-01 + %24 = add nuw nsw i32 %3, 1 + %25 = icmp eq i32 %24, 121 + br i1 %25, label %1, label %2, !llvm.loop !11 + +26: ; preds = %1, %56 + %27 = phi i32 [ 1, %1 ], [ %57, %56 ] + %28 = phi double [ %15, %1 ], [ %45, %56 ] + %29 = phi double [ %19, %1 ], [ %49, %56 ] + %30 = phi double [ %23, %1 ], [ %53, %56 ] + %31 = phi double [ %11, %1 ], [ %41, %56 ] + br label %32 + +32: ; preds = %32, %26 + %33 = phi double [ %31, %26 ], [ %41, %32 ] + %34 = phi double [ %30, %26 ], [ %53, %32 ] + %35 = phi double [ %29, %26 ], [ %49, %32 ] + %36 = phi double [ %28, %26 ], [ %45, %32 ] + %37 = phi i32 [ 0, %26 ], [ %54, %32 ] + %38 = fadd double %36, %33 + %39 = fadd double %35, %38 + %40 = fsub double %39, %34 + %41 = fmul double %40, 4.999750e-01 + %42 = fadd double %36, %41 + %43 = fsub double %42, %35 + %44 = fadd double %34, %43 + %45 = fmul double %44, 4.999750e-01 + %46 = fsub double %41, %45 + %47 = fadd double %35, %46 + %48 = fadd double %34, %47 + %49 = fmul double %48, 4.999750e-01 + %50 = fsub double %45, %41 + %51 = fadd double %50, %49 + %52 = fadd double %34, %51 + %53 = fmul double %52, 5.000000e-01 + %54 = add nuw nsw i32 %37, 1 + %55 = icmp eq i32 %54, 6 + br i1 %55, label %56, label %32 + +56: ; preds = %32 + %57 = add nuw nsw i32 %27, 1 + %58 = icmp eq i32 %57, 141 + br i1 %58, label %59, label %26, !llvm.loop !14 + +59: ; preds = %56 + store i32 141, i32* @i, align 4, !tbaa !9 + store double %45, double* getelementptr inbounds ([4 x double], [4 x double]* @e1, i64 0, i64 1), align 8, !tbaa !5 + store double %49, double* getelementptr inbounds ([4 x double], [4 x double]* @e1, i64 0, i64 2), align 16, !tbaa !5 + store double %53, double* getelementptr inbounds ([4 x double], [4 x double]* @e1, i64 0, i64 3), align 8, !tbaa !5 + store double %41, double* getelementptr inbounds ([4 x double], [4 x double]* @e1, i64 0, i64 0), align 16, !tbaa !5 + store i32 1, i32* @j, align 4, !tbaa !9 + store i32 3451, i32* @i, align 4, !tbaa !9 + store i32 1, i32* @j, align 4, !tbaa !9 + store i32 2, i32* @k, align 4, !tbaa !9 + store i32 3, i32* @l, align 4, !tbaa !9 + store double 6.000000e+00, double* getelementptr inbounds ([4 x double], [4 x double]* @e1, i64 0, i64 1), align 8, !tbaa !5 + store double 6.000000e+00, double* getelementptr inbounds ([4 x double], [4 x double]* @e1, i64 0, i64 0), align 16, !tbaa !5 + br label %60 + +60: ; preds = %59, %60 + %61 = phi i32 [ 1, %59 ], [ %62, %60 ] + %62 = add nuw nsw i32 %61, 1 + %63 = icmp eq i32 %62, 2101 + br i1 %63, label %64, label %60, !llvm.loop !15 + +64: ; preds = %60 + store i32 2101, i32* @i, align 4, !tbaa !9 + store i32 1, i32* @j, align 4, !tbaa !9 + store i32 2, i32* @k, align 4, !tbaa !9 + store i32 3, i32* @l, align 4, !tbaa !9 + store double 5.000000e-01, double* @y, align 8, !tbaa !5 + store double 5.000000e-01, double* @x, align 8, !tbaa !5 + store i32 1, i32* @i, align 4, !tbaa !9 + %65 = load i32, i32* @n7, align 4, !tbaa !9 + %66 = icmp slt i32 %65, 1 + br i1 %66, label %114, label %67 + +67: ; preds = %64, %67 + %68 = load double, double* @t, align 8, !tbaa !5 + %69 = load double, double* @t2, align 8, !tbaa !5 + %70 = load double, double* @x, align 8, !tbaa !5 + %71 = call double @sin(double %70) #4 + %72 = fmul double %69, %71 + %73 = load double, double* @x, align 8, !tbaa !5 + %74 = call double @cos(double %73) #4 + %75 = fmul double %72, %74 + %76 = load double, double* @x, align 8, !tbaa !5 + %77 = load double, double* @y, align 8, !tbaa !5 + %78 = fadd double %76, %77 + %79 = call double @cos(double %78) #4 + %80 = load double, double* @x, align 8, !tbaa !5 + %81 = load double, double* @y, align 8, !tbaa !5 + %82 = fsub double %80, %81 + %83 = call double @cos(double %82) #4 + %84 = fadd double %79, %83 + %85 = fadd double %84, -1.000000e+00 + %86 = fdiv double %75, %85 + %87 = call double @atan(double %86) #4 + %88 = fmul double %68, %87 + store double %88, double* @x, align 8, !tbaa !5 + %89 = load double, double* @t, align 8, !tbaa !5 + %90 = load double, double* @t2, align 8, !tbaa !5 + %91 = load double, double* @y, align 8, !tbaa !5 + %92 = call double @sin(double %91) #4 + %93 = fmul double %90, %92 + %94 = load double, double* @y, align 8, !tbaa !5 + %95 = call double @cos(double %94) #4 + %96 = fmul double %93, %95 + %97 = load double, double* @x, align 8, !tbaa !5 + %98 = load double, double* @y, align 8, !tbaa !5 + %99 = fadd double %97, %98 + %100 = call double @cos(double %99) #4 + %101 = load double, double* @x, align 8, !tbaa !5 + %102 = load double, double* @y, align 8, !tbaa !5 + %103 = fsub double %101, %102 + %104 = call double @cos(double %103) #4 + %105 = fadd double %100, %104 + %106 = fadd double %105, -1.000000e+00 + %107 = fdiv double %96, %106 + %108 = call double @atan(double %107) #4 + %109 = fmul double %89, %108 + store double %109, double* @y, align 8, !tbaa !5 + %110 = load i32, i32* @i, align 4, !tbaa !9 + %111 = add nsw i32 %110, 1 + store i32 %111, i32* @i, align 4, !tbaa !9 + %112 = load i32, i32* @n7, align 4, !tbaa !9 + %113 = icmp slt i32 %110, %112 + br i1 %113, label %67, label %114, !llvm.loop !16 + +114: ; preds = %67, %64 + store double 1.000000e+00, double* @z, align 8, !tbaa !5 + store double 1.000000e+00, double* @y, align 8, !tbaa !5 + store double 1.000000e+00, double* @x, align 8, !tbaa !5 + %115 = load i32, i32* @n8, align 4, !tbaa !9 + %116 = load double, double* @t, align 8 + %117 = fmul double %116, 2.000000e+00 + %118 = icmp slt i32 %115, 1 + br i1 %118, label %126, label %119 + +119: ; preds = %114 + %120 = fadd double %117, 1.000000e+00 + %121 = fmul double %116, %120 + %122 = fadd double %117, %121 + %123 = load double, double* @t2, align 8 + %124 = fdiv double %122, %123 + store double %124, double* @z, align 8, !tbaa !5 + %125 = add i32 %115, 1 + br label %126 + +126: ; preds = %119, %114 + %127 = phi i32 [ 1, %114 ], [ %125, %119 ] + store i32 %127, i32* @i, align 4, !tbaa !9 + store i32 1, i32* @j, align 4, !tbaa !9 + store i32 2, i32* @k, align 4, !tbaa !9 + store i32 3, i32* @l, align 4, !tbaa !9 + store double 1.000000e+00, double* getelementptr inbounds ([4 x double], [4 x double]* @e1, i64 0, i64 0), align 16, !tbaa !5 + store double 2.000000e+00, double* getelementptr inbounds ([4 x double], [4 x double]* @e1, i64 0, i64 1), align 8, !tbaa !5 + store double 3.000000e+00, double* getelementptr inbounds ([4 x double], [4 x double]* @e1, i64 0, i64 2), align 16, !tbaa !5 + %128 = load i32, i32* @n9, align 4, !tbaa !9 + %129 = icmp slt i32 %128, 1 + br i1 %129, label %141, label %130 + +130: ; preds = %126 + %131 = load double, double* getelementptr inbounds ([4 x double], [4 x double]* @e1, i64 0, i64 2), align 16, !tbaa !5 + %132 = load double, double* getelementptr inbounds ([4 x double], [4 x double]* @e1, i64 0, i64 3), align 8, !tbaa !5 + %133 = add i32 %128, 1 + br label %134 + +134: ; preds = %130, %134 + %135 = phi double [ %132, %130 ], [ %136, %134 ] + %136 = phi double [ %131, %130 ], [ %135, %134 ] + %137 = phi i32 [ 1, %130 ], [ %138, %134 ] + %138 = add nuw i32 %137, 1 + %139 = icmp eq i32 %137, %128 + br i1 %139, label %140, label %134, !llvm.loop !17 + +140: ; preds = %134 + store double %135, double* getelementptr inbounds ([4 x double], [4 x double]* @e1, i64 0, i64 2), align 16, !tbaa !5 + store double %136, double* getelementptr inbounds ([4 x double], [4 x double]* @e1, i64 0, i64 1), align 8, !tbaa !5 + store double %136, double* getelementptr inbounds ([4 x double], [4 x double]* @e1, i64 0, i64 3), align 8, !tbaa !5 + br label %141 + +141: ; preds = %140, %126 + %142 = phi i32 [ %133, %140 ], [ 1, %126 ] + store i32 %142, i32* @i, align 4, !tbaa !9 + store i32 2, i32* @j, align 4, !tbaa !9 + store i32 3, i32* @k, align 4, !tbaa !9 + %143 = load i32, i32* @n10, align 4, !tbaa !9 + %144 = icmp slt i32 %143, 1 + br i1 %144, label %156, label %145 + +145: ; preds = %141 + %146 = load i32, i32* @j, align 4, !tbaa !9 + %147 = load i32, i32* @k, align 4, !tbaa !9 + %148 = add i32 %143, 1 + br label %149 + +149: ; preds = %145, %149 + %150 = phi i32 [ %147, %145 ], [ %151, %149 ] + %151 = phi i32 [ %146, %145 ], [ %150, %149 ] + %152 = phi i32 [ 1, %145 ], [ %153, %149 ] + %153 = add nuw i32 %152, 1 + %154 = icmp eq i32 %152, %143 + br i1 %154, label %155, label %149, !llvm.loop !18 + +155: ; preds = %149 + store i32 %150, i32* @j, align 4, !tbaa !9 + store i32 %151, i32* @k, align 4, !tbaa !9 + br label %156 + +156: ; preds = %155, %141 + %157 = phi i32 [ %148, %155 ], [ 1, %141 ] + store i32 %157, i32* @i, align 4, !tbaa !9 + store double 7.500000e-01, double* @x, align 8, !tbaa !5 + store i32 1, i32* @i, align 4, !tbaa !9 + %158 = load i32, i32* @n11, align 4, !tbaa !9 + %159 = icmp slt i32 %158, 1 + br i1 %159, label %171, label %160 + +160: ; preds = %156, %160 + %161 = load double, double* @x, align 8, !tbaa !5 + %162 = call double @log(double %161) #4 + %163 = load double, double* @t1, align 8, !tbaa !5 + %164 = fdiv double %162, %163 + %165 = call double @exp(double %164) #4 + %166 = call double @sqrt(double %165) #4 + store double %166, double* @x, align 8, !tbaa !5 + %167 = load i32, i32* @i, align 4, !tbaa !9 + %168 = add nsw i32 %167, 1 + store i32 %168, i32* @i, align 4, !tbaa !9 + %169 = load i32, i32* @n11, align 4, !tbaa !9 + %170 = icmp slt i32 %167, %169 + br i1 %170, label %160, label %171, !llvm.loop !19 + +171: ; preds = %160, %156 + ret i32 0 +} + +; Function Attrs: mustprogress nofree nounwind willreturn +declare double @atan(double) local_unnamed_addr #1 + +; Function Attrs: mustprogress nofree nounwind willreturn +declare double @sin(double) local_unnamed_addr #1 + +; Function Attrs: mustprogress nofree nounwind willreturn +declare double @cos(double) local_unnamed_addr #1 + +; Function Attrs: mustprogress nofree nounwind willreturn +declare double @sqrt(double) local_unnamed_addr #1 + +; Function Attrs: mustprogress nofree nounwind willreturn +declare double @exp(double) local_unnamed_addr #1 + +; Function Attrs: mustprogress nofree nounwind willreturn +declare double @log(double) local_unnamed_addr #1 + +; Function Attrs: nofree norecurse nosync nounwind sspstrong uwtable +define dso_local i32 @pa(double* nocapture %0) local_unnamed_addr #2 { + %2 = getelementptr inbounds double, double* %0, i64 1 + %3 = getelementptr inbounds double, double* %0, i64 2 + %4 = getelementptr inbounds double, double* %0, i64 3 + %5 = load double, double* %2, align 8, !tbaa !5 + %6 = load double, double* %3, align 8, !tbaa !5 + %7 = load double, double* %4, align 8, !tbaa !5 + br label %8 + +8: ; preds = %8, %1 + %9 = phi double [ %7, %1 ], [ %32, %8 ] + %10 = phi double [ %6, %1 ], [ %27, %8 ] + %11 = phi double [ %5, %1 ], [ %23, %8 ] + %12 = phi i32 [ 0, %1 ], [ %33, %8 ] + %13 = load double, double* %0, align 8, !tbaa !5 + %14 = fadd double %13, %11 + %15 = fadd double %14, %10 + %16 = fsub double %15, %9 + %17 = load double, double* @t, align 8, !tbaa !5 + %18 = fmul double %16, %17 + store double %18, double* %0, align 8, !tbaa !5 + %19 = fadd double %11, %18 + %20 = fsub double %19, %10 + %21 = fadd double %9, %20 + %22 = load double, double* @t, align 8, !tbaa !5 + %23 = fmul double %22, %21 + %24 = fsub double %18, %23 + %25 = fadd double %10, %24 + %26 = fadd double %9, %25 + %27 = fmul double %22, %26 + %28 = fsub double %23, %18 + %29 = fadd double %28, %27 + %30 = fadd double %9, %29 + %31 = load double, double* @t2, align 8, !tbaa !5 + %32 = fdiv double %30, %31 + %33 = add nuw nsw i32 %12, 1 + %34 = icmp eq i32 %33, 6 + br i1 %34, label %35, label %8 + +35: ; preds = %8 + store double %23, double* %2, align 8, !tbaa !5 + store double %27, double* %3, align 8, !tbaa !5 + store double %32, double* %4, align 8, !tbaa !5 + ret i32 undef +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn +define dso_local i32 @p3(double %0, double %1, double* nocapture %2) local_unnamed_addr #3 { + %4 = load double, double* @t, align 8, !tbaa !5 + %5 = fadd double %0, %1 + %6 = fmul double %5, %4 + %7 = fadd double %6, %1 + %8 = fmul double %4, %7 + %9 = fadd double %6, %8 + %10 = load double, double* @t2, align 8, !tbaa !5 + %11 = fdiv double %9, %10 + store double %11, double* %2, align 8, !tbaa !5 + ret i32 undef +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn +define dso_local i32 @p0() local_unnamed_addr #3 { + %1 = load i32, i32* @k, align 4, !tbaa !9 + %2 = sext i32 %1 to i64 + %3 = getelementptr inbounds [4 x double], [4 x double]* @e1, i64 0, i64 %2 + %4 = load double, double* %3, align 8, !tbaa !5 + %5 = load i32, i32* @j, align 4, !tbaa !9 + %6 = sext i32 %5 to i64 + %7 = getelementptr inbounds [4 x double], [4 x double]* @e1, i64 0, i64 %6 + store double %4, double* %7, align 8, !tbaa !5 + %8 = load i32, i32* @l, align 4, !tbaa !9 + %9 = sext i32 %8 to i64 + %10 = getelementptr inbounds [4 x double], [4 x double]* @e1, i64 0, i64 %9 + %11 = load double, double* %10, align 8, !tbaa !5 + store double %11, double* %3, align 8, !tbaa !5 + %12 = load double, double* %7, align 8, !tbaa !5 + store double %12, double* %10, align 8, !tbaa !5 + ret i32 undef +} + +attributes #0 = { nofree nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #1 = { mustprogress nofree nounwind willreturn "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #2 = { nofree norecurse nosync nounwind sspstrong uwtable "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #3 = { mustprogress nofree norecurse nosync nounwind sspstrong uwtable willreturn "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" } +attributes #4 = { nounwind } + +!llvm.module.flags = !{!0, !1, !2, !3} +!llvm.ident = !{!4} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{i32 7, !"PIE Level", i32 2} +!3 = !{i32 7, !"uwtable", i32 1} +!4 = !{!"clang version 13.0.1"} +!5 = !{!6, !6, i64 0} +!6 = !{!"double", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C/C++ TBAA"} +!9 = !{!10, !10, i64 0} +!10 = !{!"int", !7, i64 0} +!11 = distinct !{!11, !12, !13} +!12 = !{!"llvm.loop.mustprogress"} +!13 = !{!"llvm.loop.unroll.disable"} +!14 = distinct !{!14, !12, !13} +!15 = distinct !{!15, !12, !13} +!16 = distinct !{!16, !12, !13} +!17 = distinct !{!17, !12, !13} +!18 = distinct !{!18, !12, !13} +!19 = distinct !{!19, !12, !13}